Skip to main content

core/stdarch/crates/core_arch/src/aarch64/sve/
generated.rs

1// This code is automatically generated. DO NOT MODIFY.
2//
3// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file:
4//
5// ```
6// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec
7// ```
8#![allow(improper_ctypes)]
9
10#[cfg(test)]
11use stdarch_test::assert_instr;
12
13use super::*;
14use crate::core_arch::arch::aarch64::*;
15
16#[doc = "Absolute difference"]
17#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"]
18#[inline(always)]
19#[target_feature(enable = "sve")]
20#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21#[cfg_attr(test, assert_instr(fabd))]
22pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
23    unsafe extern "unadjusted" {
24        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv4f32")]
25        fn _svabd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
26    }
27    unsafe { _svabd_f32_m(pg.sve_into(), op1, op2) }
28}
29#[doc = "Absolute difference"]
30#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"]
31#[inline(always)]
32#[target_feature(enable = "sve")]
33#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34#[cfg_attr(test, assert_instr(fabd))]
35pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
36    svabd_f32_m(pg, op1, svdup_n_f32(op2))
37}
38#[doc = "Absolute difference"]
39#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"]
40#[inline(always)]
41#[target_feature(enable = "sve")]
42#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43#[cfg_attr(test, assert_instr(fabd))]
44pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
45    svabd_f32_m(pg, op1, op2)
46}
47#[doc = "Absolute difference"]
48#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"]
49#[inline(always)]
50#[target_feature(enable = "sve")]
51#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
52#[cfg_attr(test, assert_instr(fabd))]
53pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
54    svabd_f32_x(pg, op1, svdup_n_f32(op2))
55}
56#[doc = "Absolute difference"]
57#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"]
58#[inline(always)]
59#[target_feature(enable = "sve")]
60#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
61#[cfg_attr(test, assert_instr(fabd))]
62pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
63    svabd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
64}
65#[doc = "Absolute difference"]
66#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"]
67#[inline(always)]
68#[target_feature(enable = "sve")]
69#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
70#[cfg_attr(test, assert_instr(fabd))]
71pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
72    svabd_f32_z(pg, op1, svdup_n_f32(op2))
73}
74#[doc = "Absolute difference"]
75#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"]
76#[inline(always)]
77#[target_feature(enable = "sve")]
78#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
79#[cfg_attr(test, assert_instr(fabd))]
80pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
81    unsafe extern "unadjusted" {
82        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv2f64")]
83        fn _svabd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
84    }
85    unsafe { _svabd_f64_m(pg.sve_into(), op1, op2) }
86}
87#[doc = "Absolute difference"]
88#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"]
89#[inline(always)]
90#[target_feature(enable = "sve")]
91#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
92#[cfg_attr(test, assert_instr(fabd))]
93pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
94    svabd_f64_m(pg, op1, svdup_n_f64(op2))
95}
96#[doc = "Absolute difference"]
97#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"]
98#[inline(always)]
99#[target_feature(enable = "sve")]
100#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
101#[cfg_attr(test, assert_instr(fabd))]
102pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
103    svabd_f64_m(pg, op1, op2)
104}
105#[doc = "Absolute difference"]
106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"]
107#[inline(always)]
108#[target_feature(enable = "sve")]
109#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
110#[cfg_attr(test, assert_instr(fabd))]
111pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
112    svabd_f64_x(pg, op1, svdup_n_f64(op2))
113}
114#[doc = "Absolute difference"]
115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"]
116#[inline(always)]
117#[target_feature(enable = "sve")]
118#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
119#[cfg_attr(test, assert_instr(fabd))]
120pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
121    svabd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
122}
123#[doc = "Absolute difference"]
124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"]
125#[inline(always)]
126#[target_feature(enable = "sve")]
127#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
128#[cfg_attr(test, assert_instr(fabd))]
129pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
130    svabd_f64_z(pg, op1, svdup_n_f64(op2))
131}
132#[doc = "Absolute difference"]
133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"]
134#[inline(always)]
135#[target_feature(enable = "sve")]
136#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
137#[cfg_attr(test, assert_instr(sabd))]
138pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
139    unsafe extern "unadjusted" {
140        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv16i8")]
141        fn _svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
142    }
143    unsafe { _svabd_s8_m(pg, op1, op2) }
144}
145#[doc = "Absolute difference"]
146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"]
147#[inline(always)]
148#[target_feature(enable = "sve")]
149#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
150#[cfg_attr(test, assert_instr(sabd))]
151pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
152    svabd_s8_m(pg, op1, svdup_n_s8(op2))
153}
154#[doc = "Absolute difference"]
155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"]
156#[inline(always)]
157#[target_feature(enable = "sve")]
158#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
159#[cfg_attr(test, assert_instr(sabd))]
160pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
161    svabd_s8_m(pg, op1, op2)
162}
163#[doc = "Absolute difference"]
164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"]
165#[inline(always)]
166#[target_feature(enable = "sve")]
167#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
168#[cfg_attr(test, assert_instr(sabd))]
169pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
170    svabd_s8_x(pg, op1, svdup_n_s8(op2))
171}
172#[doc = "Absolute difference"]
173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"]
174#[inline(always)]
175#[target_feature(enable = "sve")]
176#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
177#[cfg_attr(test, assert_instr(sabd))]
178pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
179    svabd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
180}
181#[doc = "Absolute difference"]
182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"]
183#[inline(always)]
184#[target_feature(enable = "sve")]
185#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
186#[cfg_attr(test, assert_instr(sabd))]
187pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
188    svabd_s8_z(pg, op1, svdup_n_s8(op2))
189}
190#[doc = "Absolute difference"]
191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"]
192#[inline(always)]
193#[target_feature(enable = "sve")]
194#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
195#[cfg_attr(test, assert_instr(sabd))]
196pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
197    unsafe extern "unadjusted" {
198        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv8i16")]
199        fn _svabd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
200    }
201    unsafe { _svabd_s16_m(pg.sve_into(), op1, op2) }
202}
203#[doc = "Absolute difference"]
204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"]
205#[inline(always)]
206#[target_feature(enable = "sve")]
207#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
208#[cfg_attr(test, assert_instr(sabd))]
209pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
210    svabd_s16_m(pg, op1, svdup_n_s16(op2))
211}
212#[doc = "Absolute difference"]
213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"]
214#[inline(always)]
215#[target_feature(enable = "sve")]
216#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
217#[cfg_attr(test, assert_instr(sabd))]
218pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
219    svabd_s16_m(pg, op1, op2)
220}
221#[doc = "Absolute difference"]
222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"]
223#[inline(always)]
224#[target_feature(enable = "sve")]
225#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
226#[cfg_attr(test, assert_instr(sabd))]
227pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
228    svabd_s16_x(pg, op1, svdup_n_s16(op2))
229}
230#[doc = "Absolute difference"]
231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"]
232#[inline(always)]
233#[target_feature(enable = "sve")]
234#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
235#[cfg_attr(test, assert_instr(sabd))]
236pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
237    svabd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
238}
239#[doc = "Absolute difference"]
240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"]
241#[inline(always)]
242#[target_feature(enable = "sve")]
243#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
244#[cfg_attr(test, assert_instr(sabd))]
245pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
246    svabd_s16_z(pg, op1, svdup_n_s16(op2))
247}
248#[doc = "Absolute difference"]
249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"]
250#[inline(always)]
251#[target_feature(enable = "sve")]
252#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
253#[cfg_attr(test, assert_instr(sabd))]
254pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
255    unsafe extern "unadjusted" {
256        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv4i32")]
257        fn _svabd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
258    }
259    unsafe { _svabd_s32_m(pg.sve_into(), op1, op2) }
260}
261#[doc = "Absolute difference"]
262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"]
263#[inline(always)]
264#[target_feature(enable = "sve")]
265#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
266#[cfg_attr(test, assert_instr(sabd))]
267pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
268    svabd_s32_m(pg, op1, svdup_n_s32(op2))
269}
270#[doc = "Absolute difference"]
271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"]
272#[inline(always)]
273#[target_feature(enable = "sve")]
274#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
275#[cfg_attr(test, assert_instr(sabd))]
276pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
277    svabd_s32_m(pg, op1, op2)
278}
279#[doc = "Absolute difference"]
280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"]
281#[inline(always)]
282#[target_feature(enable = "sve")]
283#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
284#[cfg_attr(test, assert_instr(sabd))]
285pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
286    svabd_s32_x(pg, op1, svdup_n_s32(op2))
287}
288#[doc = "Absolute difference"]
289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"]
290#[inline(always)]
291#[target_feature(enable = "sve")]
292#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
293#[cfg_attr(test, assert_instr(sabd))]
294pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
295    svabd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
296}
297#[doc = "Absolute difference"]
298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"]
299#[inline(always)]
300#[target_feature(enable = "sve")]
301#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
302#[cfg_attr(test, assert_instr(sabd))]
303pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
304    svabd_s32_z(pg, op1, svdup_n_s32(op2))
305}
306#[doc = "Absolute difference"]
307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"]
308#[inline(always)]
309#[target_feature(enable = "sve")]
310#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
311#[cfg_attr(test, assert_instr(sabd))]
312pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
313    unsafe extern "unadjusted" {
314        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv2i64")]
315        fn _svabd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
316    }
317    unsafe { _svabd_s64_m(pg.sve_into(), op1, op2) }
318}
319#[doc = "Absolute difference"]
320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"]
321#[inline(always)]
322#[target_feature(enable = "sve")]
323#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
324#[cfg_attr(test, assert_instr(sabd))]
325pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
326    svabd_s64_m(pg, op1, svdup_n_s64(op2))
327}
328#[doc = "Absolute difference"]
329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"]
330#[inline(always)]
331#[target_feature(enable = "sve")]
332#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
333#[cfg_attr(test, assert_instr(sabd))]
334pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
335    svabd_s64_m(pg, op1, op2)
336}
337#[doc = "Absolute difference"]
338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"]
339#[inline(always)]
340#[target_feature(enable = "sve")]
341#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
342#[cfg_attr(test, assert_instr(sabd))]
343pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
344    svabd_s64_x(pg, op1, svdup_n_s64(op2))
345}
346#[doc = "Absolute difference"]
347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"]
348#[inline(always)]
349#[target_feature(enable = "sve")]
350#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
351#[cfg_attr(test, assert_instr(sabd))]
352pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
353    svabd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
354}
355#[doc = "Absolute difference"]
356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"]
357#[inline(always)]
358#[target_feature(enable = "sve")]
359#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
360#[cfg_attr(test, assert_instr(sabd))]
361pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
362    svabd_s64_z(pg, op1, svdup_n_s64(op2))
363}
364#[doc = "Absolute difference"]
365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"]
366#[inline(always)]
367#[target_feature(enable = "sve")]
368#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
369#[cfg_attr(test, assert_instr(uabd))]
370pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
371    unsafe extern "unadjusted" {
372        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv16i8")]
373        fn _svabd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
374    }
375    unsafe { _svabd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
376}
377#[doc = "Absolute difference"]
378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"]
379#[inline(always)]
380#[target_feature(enable = "sve")]
381#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
382#[cfg_attr(test, assert_instr(uabd))]
383pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
384    svabd_u8_m(pg, op1, svdup_n_u8(op2))
385}
386#[doc = "Absolute difference"]
387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"]
388#[inline(always)]
389#[target_feature(enable = "sve")]
390#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
391#[cfg_attr(test, assert_instr(uabd))]
392pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
393    svabd_u8_m(pg, op1, op2)
394}
395#[doc = "Absolute difference"]
396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"]
397#[inline(always)]
398#[target_feature(enable = "sve")]
399#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
400#[cfg_attr(test, assert_instr(uabd))]
401pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
402    svabd_u8_x(pg, op1, svdup_n_u8(op2))
403}
404#[doc = "Absolute difference"]
405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"]
406#[inline(always)]
407#[target_feature(enable = "sve")]
408#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
409#[cfg_attr(test, assert_instr(uabd))]
410pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
411    svabd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
412}
413#[doc = "Absolute difference"]
414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"]
415#[inline(always)]
416#[target_feature(enable = "sve")]
417#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
418#[cfg_attr(test, assert_instr(uabd))]
419pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
420    svabd_u8_z(pg, op1, svdup_n_u8(op2))
421}
422#[doc = "Absolute difference"]
423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"]
424#[inline(always)]
425#[target_feature(enable = "sve")]
426#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
427#[cfg_attr(test, assert_instr(uabd))]
428pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
429    unsafe extern "unadjusted" {
430        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv8i16")]
431        fn _svabd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
432    }
433    unsafe { _svabd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
434}
435#[doc = "Absolute difference"]
436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"]
437#[inline(always)]
438#[target_feature(enable = "sve")]
439#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
440#[cfg_attr(test, assert_instr(uabd))]
441pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
442    svabd_u16_m(pg, op1, svdup_n_u16(op2))
443}
444#[doc = "Absolute difference"]
445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"]
446#[inline(always)]
447#[target_feature(enable = "sve")]
448#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
449#[cfg_attr(test, assert_instr(uabd))]
450pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
451    svabd_u16_m(pg, op1, op2)
452}
453#[doc = "Absolute difference"]
454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"]
455#[inline(always)]
456#[target_feature(enable = "sve")]
457#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
458#[cfg_attr(test, assert_instr(uabd))]
459pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
460    svabd_u16_x(pg, op1, svdup_n_u16(op2))
461}
462#[doc = "Absolute difference"]
463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"]
464#[inline(always)]
465#[target_feature(enable = "sve")]
466#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
467#[cfg_attr(test, assert_instr(uabd))]
468pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
469    svabd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
470}
471#[doc = "Absolute difference"]
472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"]
473#[inline(always)]
474#[target_feature(enable = "sve")]
475#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
476#[cfg_attr(test, assert_instr(uabd))]
477pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
478    svabd_u16_z(pg, op1, svdup_n_u16(op2))
479}
480#[doc = "Absolute difference"]
481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"]
482#[inline(always)]
483#[target_feature(enable = "sve")]
484#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
485#[cfg_attr(test, assert_instr(uabd))]
486pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
487    unsafe extern "unadjusted" {
488        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv4i32")]
489        fn _svabd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
490    }
491    unsafe { _svabd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
492}
493#[doc = "Absolute difference"]
494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"]
495#[inline(always)]
496#[target_feature(enable = "sve")]
497#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
498#[cfg_attr(test, assert_instr(uabd))]
499pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
500    svabd_u32_m(pg, op1, svdup_n_u32(op2))
501}
502#[doc = "Absolute difference"]
503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"]
504#[inline(always)]
505#[target_feature(enable = "sve")]
506#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
507#[cfg_attr(test, assert_instr(uabd))]
508pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
509    svabd_u32_m(pg, op1, op2)
510}
511#[doc = "Absolute difference"]
512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"]
513#[inline(always)]
514#[target_feature(enable = "sve")]
515#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
516#[cfg_attr(test, assert_instr(uabd))]
517pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
518    svabd_u32_x(pg, op1, svdup_n_u32(op2))
519}
520#[doc = "Absolute difference"]
521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"]
522#[inline(always)]
523#[target_feature(enable = "sve")]
524#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
525#[cfg_attr(test, assert_instr(uabd))]
526pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
527    svabd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
528}
529#[doc = "Absolute difference"]
530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"]
531#[inline(always)]
532#[target_feature(enable = "sve")]
533#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
534#[cfg_attr(test, assert_instr(uabd))]
535pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
536    svabd_u32_z(pg, op1, svdup_n_u32(op2))
537}
538#[doc = "Absolute difference"]
539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"]
540#[inline(always)]
541#[target_feature(enable = "sve")]
542#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
543#[cfg_attr(test, assert_instr(uabd))]
544pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
545    unsafe extern "unadjusted" {
546        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv2i64")]
547        fn _svabd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
548    }
549    unsafe { _svabd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
550}
551#[doc = "Absolute difference"]
552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"]
553#[inline(always)]
554#[target_feature(enable = "sve")]
555#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
556#[cfg_attr(test, assert_instr(uabd))]
557pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
558    svabd_u64_m(pg, op1, svdup_n_u64(op2))
559}
560#[doc = "Absolute difference"]
561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"]
562#[inline(always)]
563#[target_feature(enable = "sve")]
564#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
565#[cfg_attr(test, assert_instr(uabd))]
566pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
567    svabd_u64_m(pg, op1, op2)
568}
569#[doc = "Absolute difference"]
570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"]
571#[inline(always)]
572#[target_feature(enable = "sve")]
573#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
574#[cfg_attr(test, assert_instr(uabd))]
575pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
576    svabd_u64_x(pg, op1, svdup_n_u64(op2))
577}
578#[doc = "Absolute difference"]
579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"]
580#[inline(always)]
581#[target_feature(enable = "sve")]
582#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
583#[cfg_attr(test, assert_instr(uabd))]
584pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
585    svabd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
586}
587#[doc = "Absolute difference"]
588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"]
589#[inline(always)]
590#[target_feature(enable = "sve")]
591#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
592#[cfg_attr(test, assert_instr(uabd))]
593pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
594    svabd_u64_z(pg, op1, svdup_n_u64(op2))
595}
596#[doc = "Absolute value"]
597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"]
598#[inline(always)]
599#[target_feature(enable = "sve")]
600#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
601#[cfg_attr(test, assert_instr(fabs))]
602pub fn svabs_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
603    unsafe extern "unadjusted" {
604        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv4f32")]
605        fn _svabs_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
606    }
607    unsafe { _svabs_f32_m(inactive, pg.sve_into(), op) }
608}
609#[doc = "Absolute value"]
610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"]
611#[inline(always)]
612#[target_feature(enable = "sve")]
613#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
614#[cfg_attr(test, assert_instr(fabs))]
615pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
616    svabs_f32_m(op, pg, op)
617}
618#[doc = "Absolute value"]
619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"]
620#[inline(always)]
621#[target_feature(enable = "sve")]
622#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
623#[cfg_attr(test, assert_instr(fabs))]
624pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
625    svabs_f32_m(svdup_n_f32(0.0), pg, op)
626}
627#[doc = "Absolute value"]
628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"]
629#[inline(always)]
630#[target_feature(enable = "sve")]
631#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
632#[cfg_attr(test, assert_instr(fabs))]
633pub fn svabs_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
634    unsafe extern "unadjusted" {
635        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv2f64")]
636        fn _svabs_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
637    }
638    unsafe { _svabs_f64_m(inactive, pg.sve_into(), op) }
639}
640#[doc = "Absolute value"]
641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"]
642#[inline(always)]
643#[target_feature(enable = "sve")]
644#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
645#[cfg_attr(test, assert_instr(fabs))]
646pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
647    svabs_f64_m(op, pg, op)
648}
649#[doc = "Absolute value"]
650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"]
651#[inline(always)]
652#[target_feature(enable = "sve")]
653#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
654#[cfg_attr(test, assert_instr(fabs))]
655pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
656    svabs_f64_m(svdup_n_f64(0.0), pg, op)
657}
658#[doc = "Absolute value"]
659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"]
660#[inline(always)]
661#[target_feature(enable = "sve")]
662#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
663#[cfg_attr(test, assert_instr(abs))]
664pub fn svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
665    unsafe extern "unadjusted" {
666        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv16i8")]
667        fn _svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
668    }
669    unsafe { _svabs_s8_m(inactive, pg, op) }
670}
671#[doc = "Absolute value"]
672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"]
673#[inline(always)]
674#[target_feature(enable = "sve")]
675#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
676#[cfg_attr(test, assert_instr(abs))]
677pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
678    svabs_s8_m(op, pg, op)
679}
680#[doc = "Absolute value"]
681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"]
682#[inline(always)]
683#[target_feature(enable = "sve")]
684#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
685#[cfg_attr(test, assert_instr(abs))]
686pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
687    svabs_s8_m(svdup_n_s8(0), pg, op)
688}
689#[doc = "Absolute value"]
690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"]
691#[inline(always)]
692#[target_feature(enable = "sve")]
693#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
694#[cfg_attr(test, assert_instr(abs))]
695pub fn svabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
696    unsafe extern "unadjusted" {
697        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv8i16")]
698        fn _svabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
699    }
700    unsafe { _svabs_s16_m(inactive, pg.sve_into(), op) }
701}
702#[doc = "Absolute value"]
703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"]
704#[inline(always)]
705#[target_feature(enable = "sve")]
706#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
707#[cfg_attr(test, assert_instr(abs))]
708pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
709    svabs_s16_m(op, pg, op)
710}
711#[doc = "Absolute value"]
712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"]
713#[inline(always)]
714#[target_feature(enable = "sve")]
715#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
716#[cfg_attr(test, assert_instr(abs))]
717pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
718    svabs_s16_m(svdup_n_s16(0), pg, op)
719}
720#[doc = "Absolute value"]
721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"]
722#[inline(always)]
723#[target_feature(enable = "sve")]
724#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
725#[cfg_attr(test, assert_instr(abs))]
726pub fn svabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
727    unsafe extern "unadjusted" {
728        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv4i32")]
729        fn _svabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
730    }
731    unsafe { _svabs_s32_m(inactive, pg.sve_into(), op) }
732}
733#[doc = "Absolute value"]
734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"]
735#[inline(always)]
736#[target_feature(enable = "sve")]
737#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
738#[cfg_attr(test, assert_instr(abs))]
739pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
740    svabs_s32_m(op, pg, op)
741}
742#[doc = "Absolute value"]
743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"]
744#[inline(always)]
745#[target_feature(enable = "sve")]
746#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
747#[cfg_attr(test, assert_instr(abs))]
748pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
749    svabs_s32_m(svdup_n_s32(0), pg, op)
750}
751#[doc = "Absolute value"]
752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"]
753#[inline(always)]
754#[target_feature(enable = "sve")]
755#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
756#[cfg_attr(test, assert_instr(abs))]
757pub fn svabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
758    unsafe extern "unadjusted" {
759        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv2i64")]
760        fn _svabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
761    }
762    unsafe { _svabs_s64_m(inactive, pg.sve_into(), op) }
763}
764#[doc = "Absolute value"]
765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"]
766#[inline(always)]
767#[target_feature(enable = "sve")]
768#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
769#[cfg_attr(test, assert_instr(abs))]
770pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
771    svabs_s64_m(op, pg, op)
772}
773#[doc = "Absolute value"]
774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"]
775#[inline(always)]
776#[target_feature(enable = "sve")]
777#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
778#[cfg_attr(test, assert_instr(abs))]
779pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
780    svabs_s64_m(svdup_n_s64(0), pg, op)
781}
782#[doc = "Absolute compare greater than or equal to"]
783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f32])"]
784#[inline(always)]
785#[target_feature(enable = "sve")]
786#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
787#[cfg_attr(test, assert_instr(facge))]
788pub fn svacge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
789    unsafe extern "unadjusted" {
790        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv4f32")]
791        fn _svacge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
792    }
793    unsafe { _svacge_f32(pg.sve_into(), op1, op2).sve_into() }
794}
795#[doc = "Absolute compare greater than or equal to"]
796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f32])"]
797#[inline(always)]
798#[target_feature(enable = "sve")]
799#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
800#[cfg_attr(test, assert_instr(facge))]
801pub fn svacge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
802    svacge_f32(pg, op1, svdup_n_f32(op2))
803}
804#[doc = "Absolute compare greater than or equal to"]
805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f64])"]
806#[inline(always)]
807#[target_feature(enable = "sve")]
808#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
809#[cfg_attr(test, assert_instr(facge))]
810pub fn svacge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
811    unsafe extern "unadjusted" {
812        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv2f64")]
813        fn _svacge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
814    }
815    unsafe { _svacge_f64(pg.sve_into(), op1, op2).sve_into() }
816}
817#[doc = "Absolute compare greater than or equal to"]
818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f64])"]
819#[inline(always)]
820#[target_feature(enable = "sve")]
821#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
822#[cfg_attr(test, assert_instr(facge))]
823pub fn svacge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
824    svacge_f64(pg, op1, svdup_n_f64(op2))
825}
826#[doc = "Absolute compare greater than"]
827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f32])"]
828#[inline(always)]
829#[target_feature(enable = "sve")]
830#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
831#[cfg_attr(test, assert_instr(facgt))]
832pub fn svacgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
833    unsafe extern "unadjusted" {
834        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv4f32")]
835        fn _svacgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
836    }
837    unsafe { _svacgt_f32(pg.sve_into(), op1, op2).sve_into() }
838}
839#[doc = "Absolute compare greater than"]
840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f32])"]
841#[inline(always)]
842#[target_feature(enable = "sve")]
843#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
844#[cfg_attr(test, assert_instr(facgt))]
845pub fn svacgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
846    svacgt_f32(pg, op1, svdup_n_f32(op2))
847}
848#[doc = "Absolute compare greater than"]
849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f64])"]
850#[inline(always)]
851#[target_feature(enable = "sve")]
852#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
853#[cfg_attr(test, assert_instr(facgt))]
854pub fn svacgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
855    unsafe extern "unadjusted" {
856        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv2f64")]
857        fn _svacgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
858    }
859    unsafe { _svacgt_f64(pg.sve_into(), op1, op2).sve_into() }
860}
861#[doc = "Absolute compare greater than"]
862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f64])"]
863#[inline(always)]
864#[target_feature(enable = "sve")]
865#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
866#[cfg_attr(test, assert_instr(facgt))]
867pub fn svacgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
868    svacgt_f64(pg, op1, svdup_n_f64(op2))
869}
870#[doc = "Absolute compare less than or equal to"]
871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f32])"]
872#[inline(always)]
873#[target_feature(enable = "sve")]
874#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
875#[cfg_attr(test, assert_instr(facge))]
876pub fn svacle_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
877    svacge_f32(pg, op2, op1)
878}
879#[doc = "Absolute compare less than or equal to"]
880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f32])"]
881#[inline(always)]
882#[target_feature(enable = "sve")]
883#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
884#[cfg_attr(test, assert_instr(facge))]
885pub fn svacle_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
886    svacle_f32(pg, op1, svdup_n_f32(op2))
887}
888#[doc = "Absolute compare less than or equal to"]
889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f64])"]
890#[inline(always)]
891#[target_feature(enable = "sve")]
892#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
893#[cfg_attr(test, assert_instr(facge))]
894pub fn svacle_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
895    svacge_f64(pg, op2, op1)
896}
897#[doc = "Absolute compare less than or equal to"]
898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f64])"]
899#[inline(always)]
900#[target_feature(enable = "sve")]
901#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
902#[cfg_attr(test, assert_instr(facge))]
903pub fn svacle_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
904    svacle_f64(pg, op1, svdup_n_f64(op2))
905}
906#[doc = "Absolute compare less than"]
907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f32])"]
908#[inline(always)]
909#[target_feature(enable = "sve")]
910#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
911#[cfg_attr(test, assert_instr(facgt))]
912pub fn svaclt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
913    svacgt_f32(pg, op2, op1)
914}
915#[doc = "Absolute compare less than"]
916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f32])"]
917#[inline(always)]
918#[target_feature(enable = "sve")]
919#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
920#[cfg_attr(test, assert_instr(facgt))]
921pub fn svaclt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
922    svaclt_f32(pg, op1, svdup_n_f32(op2))
923}
924#[doc = "Absolute compare less than"]
925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f64])"]
926#[inline(always)]
927#[target_feature(enable = "sve")]
928#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
929#[cfg_attr(test, assert_instr(facgt))]
930pub fn svaclt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
931    svacgt_f64(pg, op2, op1)
932}
933#[doc = "Absolute compare less than"]
934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f64])"]
935#[inline(always)]
936#[target_feature(enable = "sve")]
937#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
938#[cfg_attr(test, assert_instr(facgt))]
939pub fn svaclt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
940    svaclt_f64(pg, op1, svdup_n_f64(op2))
941}
942#[doc = "Add"]
943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_m)"]
944#[inline(always)]
945#[target_feature(enable = "sve")]
946#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
947#[cfg_attr(test, assert_instr(fadd))]
948pub fn svadd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
949    unsafe extern "unadjusted" {
950        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv4f32")]
951        fn _svadd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
952    }
953    unsafe { _svadd_f32_m(pg.sve_into(), op1, op2) }
954}
955#[doc = "Add"]
956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_m)"]
957#[inline(always)]
958#[target_feature(enable = "sve")]
959#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
960#[cfg_attr(test, assert_instr(fadd))]
961pub fn svadd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
962    svadd_f32_m(pg, op1, svdup_n_f32(op2))
963}
964#[doc = "Add"]
965#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_x)"]
966#[inline(always)]
967#[target_feature(enable = "sve")]
968#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
969#[cfg_attr(test, assert_instr(fadd))]
970pub fn svadd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
971    svadd_f32_m(pg, op1, op2)
972}
973#[doc = "Add"]
974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_x)"]
975#[inline(always)]
976#[target_feature(enable = "sve")]
977#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
978#[cfg_attr(test, assert_instr(fadd))]
979pub fn svadd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
980    svadd_f32_x(pg, op1, svdup_n_f32(op2))
981}
982#[doc = "Add"]
983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_z)"]
984#[inline(always)]
985#[target_feature(enable = "sve")]
986#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
987#[cfg_attr(test, assert_instr(fadd))]
988pub fn svadd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
989    svadd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
990}
991#[doc = "Add"]
992#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_z)"]
993#[inline(always)]
994#[target_feature(enable = "sve")]
995#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
996#[cfg_attr(test, assert_instr(fadd))]
997pub fn svadd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
998    svadd_f32_z(pg, op1, svdup_n_f32(op2))
999}
1000#[doc = "Add"]
1001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_m)"]
1002#[inline(always)]
1003#[target_feature(enable = "sve")]
1004#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1005#[cfg_attr(test, assert_instr(fadd))]
1006pub fn svadd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
1007    unsafe extern "unadjusted" {
1008        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv2f64")]
1009        fn _svadd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
1010    }
1011    unsafe { _svadd_f64_m(pg.sve_into(), op1, op2) }
1012}
1013#[doc = "Add"]
1014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_m)"]
1015#[inline(always)]
1016#[target_feature(enable = "sve")]
1017#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1018#[cfg_attr(test, assert_instr(fadd))]
1019pub fn svadd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
1020    svadd_f64_m(pg, op1, svdup_n_f64(op2))
1021}
1022#[doc = "Add"]
1023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_x)"]
1024#[inline(always)]
1025#[target_feature(enable = "sve")]
1026#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1027#[cfg_attr(test, assert_instr(fadd))]
1028pub fn svadd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
1029    svadd_f64_m(pg, op1, op2)
1030}
1031#[doc = "Add"]
1032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_x)"]
1033#[inline(always)]
1034#[target_feature(enable = "sve")]
1035#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1036#[cfg_attr(test, assert_instr(fadd))]
1037pub fn svadd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
1038    svadd_f64_x(pg, op1, svdup_n_f64(op2))
1039}
1040#[doc = "Add"]
1041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_z)"]
1042#[inline(always)]
1043#[target_feature(enable = "sve")]
1044#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1045#[cfg_attr(test, assert_instr(fadd))]
1046pub fn svadd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
1047    svadd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
1048}
1049#[doc = "Add"]
1050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_z)"]
1051#[inline(always)]
1052#[target_feature(enable = "sve")]
1053#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1054#[cfg_attr(test, assert_instr(fadd))]
1055pub fn svadd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
1056    svadd_f64_z(pg, op1, svdup_n_f64(op2))
1057}
1058#[doc = "Add"]
1059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_m)"]
1060#[inline(always)]
1061#[target_feature(enable = "sve")]
1062#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1063#[cfg_attr(test, assert_instr(add))]
1064pub fn svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
1065    unsafe extern "unadjusted" {
1066        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv16i8")]
1067        fn _svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
1068    }
1069    unsafe { _svadd_s8_m(pg, op1, op2) }
1070}
1071#[doc = "Add"]
1072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_m)"]
1073#[inline(always)]
1074#[target_feature(enable = "sve")]
1075#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1076#[cfg_attr(test, assert_instr(add))]
1077pub fn svadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
1078    svadd_s8_m(pg, op1, svdup_n_s8(op2))
1079}
1080#[doc = "Add"]
1081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_x)"]
1082#[inline(always)]
1083#[target_feature(enable = "sve")]
1084#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1085#[cfg_attr(test, assert_instr(add))]
1086pub fn svadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
1087    svadd_s8_m(pg, op1, op2)
1088}
1089#[doc = "Add"]
1090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_x)"]
1091#[inline(always)]
1092#[target_feature(enable = "sve")]
1093#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1094#[cfg_attr(test, assert_instr(add))]
1095pub fn svadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
1096    svadd_s8_x(pg, op1, svdup_n_s8(op2))
1097}
1098#[doc = "Add"]
1099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_z)"]
1100#[inline(always)]
1101#[target_feature(enable = "sve")]
1102#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1103#[cfg_attr(test, assert_instr(add))]
1104pub fn svadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
1105    svadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
1106}
1107#[doc = "Add"]
1108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_z)"]
1109#[inline(always)]
1110#[target_feature(enable = "sve")]
1111#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1112#[cfg_attr(test, assert_instr(add))]
1113pub fn svadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
1114    svadd_s8_z(pg, op1, svdup_n_s8(op2))
1115}
1116#[doc = "Add"]
1117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_m)"]
1118#[inline(always)]
1119#[target_feature(enable = "sve")]
1120#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1121#[cfg_attr(test, assert_instr(add))]
1122pub fn svadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
1123    unsafe extern "unadjusted" {
1124        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv8i16")]
1125        fn _svadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
1126    }
1127    unsafe { _svadd_s16_m(pg.sve_into(), op1, op2) }
1128}
1129#[doc = "Add"]
1130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_m)"]
1131#[inline(always)]
1132#[target_feature(enable = "sve")]
1133#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1134#[cfg_attr(test, assert_instr(add))]
1135pub fn svadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
1136    svadd_s16_m(pg, op1, svdup_n_s16(op2))
1137}
1138#[doc = "Add"]
1139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_x)"]
1140#[inline(always)]
1141#[target_feature(enable = "sve")]
1142#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1143#[cfg_attr(test, assert_instr(add))]
1144pub fn svadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
1145    svadd_s16_m(pg, op1, op2)
1146}
1147#[doc = "Add"]
1148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_x)"]
1149#[inline(always)]
1150#[target_feature(enable = "sve")]
1151#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1152#[cfg_attr(test, assert_instr(add))]
1153pub fn svadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
1154    svadd_s16_x(pg, op1, svdup_n_s16(op2))
1155}
1156#[doc = "Add"]
1157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_z)"]
1158#[inline(always)]
1159#[target_feature(enable = "sve")]
1160#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1161#[cfg_attr(test, assert_instr(add))]
1162pub fn svadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
1163    svadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
1164}
1165#[doc = "Add"]
1166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_z)"]
1167#[inline(always)]
1168#[target_feature(enable = "sve")]
1169#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1170#[cfg_attr(test, assert_instr(add))]
1171pub fn svadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
1172    svadd_s16_z(pg, op1, svdup_n_s16(op2))
1173}
1174#[doc = "Add"]
1175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_m)"]
1176#[inline(always)]
1177#[target_feature(enable = "sve")]
1178#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1179#[cfg_attr(test, assert_instr(add))]
1180pub fn svadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
1181    unsafe extern "unadjusted" {
1182        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv4i32")]
1183        fn _svadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
1184    }
1185    unsafe { _svadd_s32_m(pg.sve_into(), op1, op2) }
1186}
1187#[doc = "Add"]
1188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_m)"]
1189#[inline(always)]
1190#[target_feature(enable = "sve")]
1191#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1192#[cfg_attr(test, assert_instr(add))]
1193pub fn svadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
1194    svadd_s32_m(pg, op1, svdup_n_s32(op2))
1195}
1196#[doc = "Add"]
1197#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_x)"]
1198#[inline(always)]
1199#[target_feature(enable = "sve")]
1200#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1201#[cfg_attr(test, assert_instr(add))]
1202pub fn svadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
1203    svadd_s32_m(pg, op1, op2)
1204}
1205#[doc = "Add"]
1206#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_x)"]
1207#[inline(always)]
1208#[target_feature(enable = "sve")]
1209#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1210#[cfg_attr(test, assert_instr(add))]
1211pub fn svadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
1212    svadd_s32_x(pg, op1, svdup_n_s32(op2))
1213}
1214#[doc = "Add"]
1215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_z)"]
1216#[inline(always)]
1217#[target_feature(enable = "sve")]
1218#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1219#[cfg_attr(test, assert_instr(add))]
1220pub fn svadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
1221    svadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
1222}
1223#[doc = "Add"]
1224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_z)"]
1225#[inline(always)]
1226#[target_feature(enable = "sve")]
1227#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1228#[cfg_attr(test, assert_instr(add))]
1229pub fn svadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
1230    svadd_s32_z(pg, op1, svdup_n_s32(op2))
1231}
1232#[doc = "Add"]
1233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_m)"]
1234#[inline(always)]
1235#[target_feature(enable = "sve")]
1236#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1237#[cfg_attr(test, assert_instr(add))]
1238pub fn svadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
1239    unsafe extern "unadjusted" {
1240        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv2i64")]
1241        fn _svadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
1242    }
1243    unsafe { _svadd_s64_m(pg.sve_into(), op1, op2) }
1244}
1245#[doc = "Add"]
1246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_m)"]
1247#[inline(always)]
1248#[target_feature(enable = "sve")]
1249#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1250#[cfg_attr(test, assert_instr(add))]
1251pub fn svadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
1252    svadd_s64_m(pg, op1, svdup_n_s64(op2))
1253}
1254#[doc = "Add"]
1255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_x)"]
1256#[inline(always)]
1257#[target_feature(enable = "sve")]
1258#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1259#[cfg_attr(test, assert_instr(add))]
1260pub fn svadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
1261    svadd_s64_m(pg, op1, op2)
1262}
1263#[doc = "Add"]
1264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_x)"]
1265#[inline(always)]
1266#[target_feature(enable = "sve")]
1267#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1268#[cfg_attr(test, assert_instr(add))]
1269pub fn svadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
1270    svadd_s64_x(pg, op1, svdup_n_s64(op2))
1271}
1272#[doc = "Add"]
1273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_z)"]
1274#[inline(always)]
1275#[target_feature(enable = "sve")]
1276#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1277#[cfg_attr(test, assert_instr(add))]
1278pub fn svadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
1279    svadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
1280}
1281#[doc = "Add"]
1282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_z)"]
1283#[inline(always)]
1284#[target_feature(enable = "sve")]
1285#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1286#[cfg_attr(test, assert_instr(add))]
1287pub fn svadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
1288    svadd_s64_z(pg, op1, svdup_n_s64(op2))
1289}
1290#[doc = "Add"]
1291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_m)"]
1292#[inline(always)]
1293#[target_feature(enable = "sve")]
1294#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1295#[cfg_attr(test, assert_instr(add))]
1296pub fn svadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
1297    unsafe { svadd_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
1298}
1299#[doc = "Add"]
1300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_m)"]
1301#[inline(always)]
1302#[target_feature(enable = "sve")]
1303#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1304#[cfg_attr(test, assert_instr(add))]
1305pub fn svadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
1306    svadd_u8_m(pg, op1, svdup_n_u8(op2))
1307}
1308#[doc = "Add"]
1309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_x)"]
1310#[inline(always)]
1311#[target_feature(enable = "sve")]
1312#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1313#[cfg_attr(test, assert_instr(add))]
1314pub fn svadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
1315    svadd_u8_m(pg, op1, op2)
1316}
1317#[doc = "Add"]
1318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_x)"]
1319#[inline(always)]
1320#[target_feature(enable = "sve")]
1321#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1322#[cfg_attr(test, assert_instr(add))]
1323pub fn svadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
1324    svadd_u8_x(pg, op1, svdup_n_u8(op2))
1325}
1326#[doc = "Add"]
1327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_z)"]
1328#[inline(always)]
1329#[target_feature(enable = "sve")]
1330#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1331#[cfg_attr(test, assert_instr(add))]
1332pub fn svadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
1333    svadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
1334}
1335#[doc = "Add"]
1336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_z)"]
1337#[inline(always)]
1338#[target_feature(enable = "sve")]
1339#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1340#[cfg_attr(test, assert_instr(add))]
1341pub fn svadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
1342    svadd_u8_z(pg, op1, svdup_n_u8(op2))
1343}
1344#[doc = "Add"]
1345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_m)"]
1346#[inline(always)]
1347#[target_feature(enable = "sve")]
1348#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1349#[cfg_attr(test, assert_instr(add))]
1350pub fn svadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
1351    unsafe { svadd_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
1352}
1353#[doc = "Add"]
1354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_m)"]
1355#[inline(always)]
1356#[target_feature(enable = "sve")]
1357#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1358#[cfg_attr(test, assert_instr(add))]
1359pub fn svadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
1360    svadd_u16_m(pg, op1, svdup_n_u16(op2))
1361}
1362#[doc = "Add"]
1363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_x)"]
1364#[inline(always)]
1365#[target_feature(enable = "sve")]
1366#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1367#[cfg_attr(test, assert_instr(add))]
1368pub fn svadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
1369    svadd_u16_m(pg, op1, op2)
1370}
1371#[doc = "Add"]
1372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_x)"]
1373#[inline(always)]
1374#[target_feature(enable = "sve")]
1375#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1376#[cfg_attr(test, assert_instr(add))]
1377pub fn svadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
1378    svadd_u16_x(pg, op1, svdup_n_u16(op2))
1379}
1380#[doc = "Add"]
1381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_z)"]
1382#[inline(always)]
1383#[target_feature(enable = "sve")]
1384#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1385#[cfg_attr(test, assert_instr(add))]
1386pub fn svadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
1387    svadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
1388}
1389#[doc = "Add"]
1390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_z)"]
1391#[inline(always)]
1392#[target_feature(enable = "sve")]
1393#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1394#[cfg_attr(test, assert_instr(add))]
1395pub fn svadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
1396    svadd_u16_z(pg, op1, svdup_n_u16(op2))
1397}
1398#[doc = "Add"]
1399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_m)"]
1400#[inline(always)]
1401#[target_feature(enable = "sve")]
1402#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1403#[cfg_attr(test, assert_instr(add))]
1404pub fn svadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
1405    unsafe { svadd_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
1406}
1407#[doc = "Add"]
1408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_m)"]
1409#[inline(always)]
1410#[target_feature(enable = "sve")]
1411#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1412#[cfg_attr(test, assert_instr(add))]
1413pub fn svadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
1414    svadd_u32_m(pg, op1, svdup_n_u32(op2))
1415}
1416#[doc = "Add"]
1417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_x)"]
1418#[inline(always)]
1419#[target_feature(enable = "sve")]
1420#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1421#[cfg_attr(test, assert_instr(add))]
1422pub fn svadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
1423    svadd_u32_m(pg, op1, op2)
1424}
1425#[doc = "Add"]
1426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_x)"]
1427#[inline(always)]
1428#[target_feature(enable = "sve")]
1429#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1430#[cfg_attr(test, assert_instr(add))]
1431pub fn svadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
1432    svadd_u32_x(pg, op1, svdup_n_u32(op2))
1433}
1434#[doc = "Add"]
1435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_z)"]
1436#[inline(always)]
1437#[target_feature(enable = "sve")]
1438#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1439#[cfg_attr(test, assert_instr(add))]
1440pub fn svadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
1441    svadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
1442}
1443#[doc = "Add"]
1444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_z)"]
1445#[inline(always)]
1446#[target_feature(enable = "sve")]
1447#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1448#[cfg_attr(test, assert_instr(add))]
1449pub fn svadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
1450    svadd_u32_z(pg, op1, svdup_n_u32(op2))
1451}
1452#[doc = "Add"]
1453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_m)"]
1454#[inline(always)]
1455#[target_feature(enable = "sve")]
1456#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1457#[cfg_attr(test, assert_instr(add))]
1458pub fn svadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
1459    unsafe { svadd_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
1460}
1461#[doc = "Add"]
1462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_m)"]
1463#[inline(always)]
1464#[target_feature(enable = "sve")]
1465#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1466#[cfg_attr(test, assert_instr(add))]
1467pub fn svadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
1468    svadd_u64_m(pg, op1, svdup_n_u64(op2))
1469}
1470#[doc = "Add"]
1471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_x)"]
1472#[inline(always)]
1473#[target_feature(enable = "sve")]
1474#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1475#[cfg_attr(test, assert_instr(add))]
1476pub fn svadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
1477    svadd_u64_m(pg, op1, op2)
1478}
1479#[doc = "Add"]
1480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_x)"]
1481#[inline(always)]
1482#[target_feature(enable = "sve")]
1483#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1484#[cfg_attr(test, assert_instr(add))]
1485pub fn svadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
1486    svadd_u64_x(pg, op1, svdup_n_u64(op2))
1487}
1488#[doc = "Add"]
1489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_z)"]
1490#[inline(always)]
1491#[target_feature(enable = "sve")]
1492#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1493#[cfg_attr(test, assert_instr(add))]
1494pub fn svadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
1495    svadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
1496}
1497#[doc = "Add"]
1498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_z)"]
1499#[inline(always)]
1500#[target_feature(enable = "sve")]
1501#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1502#[cfg_attr(test, assert_instr(add))]
1503pub fn svadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
1504    svadd_u64_z(pg, op1, svdup_n_u64(op2))
1505}
1506#[doc = "Add reduction (strictly-ordered)"]
1507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f32])"]
1508#[inline(always)]
1509#[target_feature(enable = "sve")]
1510#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1511#[cfg_attr(test, assert_instr(fadda))]
1512pub fn svadda_f32(pg: svbool_t, initial: f32, op: svfloat32_t) -> f32 {
1513    unsafe extern "unadjusted" {
1514        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv4f32")]
1515        fn _svadda_f32(pg: svbool4_t, initial: f32, op: svfloat32_t) -> f32;
1516    }
1517    unsafe { _svadda_f32(pg.sve_into(), initial, op) }
1518}
1519#[doc = "Add reduction (strictly-ordered)"]
1520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f64])"]
1521#[inline(always)]
1522#[target_feature(enable = "sve")]
1523#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1524#[cfg_attr(test, assert_instr(fadda))]
1525pub fn svadda_f64(pg: svbool_t, initial: f64, op: svfloat64_t) -> f64 {
1526    unsafe extern "unadjusted" {
1527        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv2f64")]
1528        fn _svadda_f64(pg: svbool2_t, initial: f64, op: svfloat64_t) -> f64;
1529    }
1530    unsafe { _svadda_f64(pg.sve_into(), initial, op) }
1531}
1532#[doc = "Add reduction"]
1533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"]
1534#[inline(always)]
1535#[target_feature(enable = "sve")]
1536#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1537#[cfg_attr(test, assert_instr(faddv))]
1538pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
1539    unsafe extern "unadjusted" {
1540        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv4f32")]
1541        fn _svaddv_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
1542    }
1543    unsafe { _svaddv_f32(pg.sve_into(), op) }
1544}
1545#[doc = "Add reduction"]
1546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"]
1547#[inline(always)]
1548#[target_feature(enable = "sve")]
1549#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1550#[cfg_attr(test, assert_instr(faddv))]
1551pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
1552    unsafe extern "unadjusted" {
1553        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv2f64")]
1554        fn _svaddv_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
1555    }
1556    unsafe { _svaddv_f64(pg.sve_into(), op) }
1557}
1558#[doc = "Add reduction"]
1559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"]
1560#[inline(always)]
1561#[target_feature(enable = "sve")]
1562#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1563#[cfg_attr(test, assert_instr(uaddv))]
1564pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 {
1565    unsafe extern "unadjusted" {
1566        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv2i64")]
1567        fn _svaddv_s64(pg: svbool2_t, op: svint64_t) -> i64;
1568    }
1569    unsafe { _svaddv_s64(pg.sve_into(), op) }
1570}
1571#[doc = "Add reduction"]
1572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"]
1573#[inline(always)]
1574#[target_feature(enable = "sve")]
1575#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1576#[cfg_attr(test, assert_instr(uaddv))]
1577pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
1578    unsafe extern "unadjusted" {
1579        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv2i64")]
1580        fn _svaddv_u64(pg: svbool2_t, op: svint64_t) -> i64;
1581    }
1582    unsafe { _svaddv_u64(pg.sve_into(), op.as_signed()).as_unsigned() }
1583}
1584#[doc = "Add reduction"]
1585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"]
1586#[inline(always)]
1587#[target_feature(enable = "sve")]
1588#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1589#[cfg_attr(test, assert_instr(saddv))]
1590pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 {
1591    unsafe extern "unadjusted" {
1592        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv16i8")]
1593        fn _svaddv_s8(pg: svbool_t, op: svint8_t) -> i64;
1594    }
1595    unsafe { _svaddv_s8(pg, op) }
1596}
1597#[doc = "Add reduction"]
1598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"]
1599#[inline(always)]
1600#[target_feature(enable = "sve")]
1601#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1602#[cfg_attr(test, assert_instr(saddv))]
1603pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 {
1604    unsafe extern "unadjusted" {
1605        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv8i16")]
1606        fn _svaddv_s16(pg: svbool8_t, op: svint16_t) -> i64;
1607    }
1608    unsafe { _svaddv_s16(pg.sve_into(), op) }
1609}
1610#[doc = "Add reduction"]
1611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"]
1612#[inline(always)]
1613#[target_feature(enable = "sve")]
1614#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1615#[cfg_attr(test, assert_instr(saddv))]
1616pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 {
1617    unsafe extern "unadjusted" {
1618        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv4i32")]
1619        fn _svaddv_s32(pg: svbool4_t, op: svint32_t) -> i64;
1620    }
1621    unsafe { _svaddv_s32(pg.sve_into(), op) }
1622}
1623#[doc = "Add reduction"]
1624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"]
1625#[inline(always)]
1626#[target_feature(enable = "sve")]
1627#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1628#[cfg_attr(test, assert_instr(uaddv))]
1629pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 {
1630    unsafe extern "unadjusted" {
1631        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv16i8")]
1632        fn _svaddv_u8(pg: svbool_t, op: svint8_t) -> i64;
1633    }
1634    unsafe { _svaddv_u8(pg, op.as_signed()).as_unsigned() }
1635}
1636#[doc = "Add reduction"]
1637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"]
1638#[inline(always)]
1639#[target_feature(enable = "sve")]
1640#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1641#[cfg_attr(test, assert_instr(uaddv))]
1642pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 {
1643    unsafe extern "unadjusted" {
1644        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv8i16")]
1645        fn _svaddv_u16(pg: svbool8_t, op: svint16_t) -> i64;
1646    }
1647    unsafe { _svaddv_u16(pg.sve_into(), op.as_signed()).as_unsigned() }
1648}
1649#[doc = "Add reduction"]
1650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"]
1651#[inline(always)]
1652#[target_feature(enable = "sve")]
1653#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1654#[cfg_attr(test, assert_instr(uaddv))]
1655pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 {
1656    unsafe extern "unadjusted" {
1657        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv4i32")]
1658        fn _svaddv_u32(pg: svbool4_t, op: svint32_t) -> i64;
1659    }
1660    unsafe { _svaddv_u32(pg.sve_into(), op.as_signed()).as_unsigned() }
1661}
1662#[doc = "Compute vector addresses for 8-bit data"]
1663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[s32]offset)"]
1664#[inline(always)]
1665#[target_feature(enable = "sve")]
1666#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1667#[cfg_attr(test, assert_instr(adr))]
1668pub fn svadrb_u32base_s32offset(bases: svuint32_t, offsets: svint32_t) -> svuint32_t {
1669    unsafe extern "unadjusted" {
1670        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv4i32")]
1671        fn _svadrb_u32base_s32offset(bases: svint32_t, offsets: svint32_t) -> svint32_t;
1672    }
1673    unsafe { _svadrb_u32base_s32offset(bases.as_signed(), offsets).as_unsigned() }
1674}
1675#[doc = "Compute vector addresses for 16-bit data"]
1676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[s32]index)"]
1677#[inline(always)]
1678#[target_feature(enable = "sve")]
1679#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1680#[cfg_attr(test, assert_instr(adr))]
1681pub fn svadrh_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t {
1682    unsafe extern "unadjusted" {
1683        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv4i32")]
1684        fn _svadrh_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t;
1685    }
1686    unsafe { _svadrh_u32base_s32index(bases.as_signed(), indices).as_unsigned() }
1687}
1688#[doc = "Compute vector addresses for 32-bit data"]
1689#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[s32]index)"]
1690#[inline(always)]
1691#[target_feature(enable = "sve")]
1692#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1693#[cfg_attr(test, assert_instr(adr))]
1694pub fn svadrw_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t {
1695    unsafe extern "unadjusted" {
1696        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv4i32")]
1697        fn _svadrw_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t;
1698    }
1699    unsafe { _svadrw_u32base_s32index(bases.as_signed(), indices).as_unsigned() }
1700}
1701#[doc = "Compute vector addresses for 64-bit data"]
1702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[s32]index)"]
1703#[inline(always)]
1704#[target_feature(enable = "sve")]
1705#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1706#[cfg_attr(test, assert_instr(adr))]
1707pub fn svadrd_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t {
1708    unsafe extern "unadjusted" {
1709        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv4i32")]
1710        fn _svadrd_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t;
1711    }
1712    unsafe { _svadrd_u32base_s32index(bases.as_signed(), indices).as_unsigned() }
1713}
1714#[doc = "Compute vector addresses for 8-bit data"]
1715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[u32]offset)"]
1716#[inline(always)]
1717#[target_feature(enable = "sve")]
1718#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1719#[cfg_attr(test, assert_instr(adr))]
1720pub fn svadrb_u32base_u32offset(bases: svuint32_t, offsets: svuint32_t) -> svuint32_t {
1721    unsafe { svadrb_u32base_s32offset(bases, offsets.as_signed()) }
1722}
1723#[doc = "Compute vector addresses for 16-bit data"]
1724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[u32]index)"]
1725#[inline(always)]
1726#[target_feature(enable = "sve")]
1727#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1728#[cfg_attr(test, assert_instr(adr))]
1729pub fn svadrh_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t {
1730    unsafe { svadrh_u32base_s32index(bases, indices.as_signed()) }
1731}
1732#[doc = "Compute vector addresses for 32-bit data"]
1733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[u32]index)"]
1734#[inline(always)]
1735#[target_feature(enable = "sve")]
1736#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1737#[cfg_attr(test, assert_instr(adr))]
1738pub fn svadrw_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t {
1739    unsafe { svadrw_u32base_s32index(bases, indices.as_signed()) }
1740}
1741#[doc = "Compute vector addresses for 64-bit data"]
1742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[u32]index)"]
1743#[inline(always)]
1744#[target_feature(enable = "sve")]
1745#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1746#[cfg_attr(test, assert_instr(adr))]
1747pub fn svadrd_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t {
1748    unsafe { svadrd_u32base_s32index(bases, indices.as_signed()) }
1749}
1750#[doc = "Compute vector addresses for 8-bit data"]
1751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[s64]offset)"]
1752#[inline(always)]
1753#[target_feature(enable = "sve")]
1754#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1755#[cfg_attr(test, assert_instr(adr))]
1756pub fn svadrb_u64base_s64offset(bases: svuint64_t, offsets: svint64_t) -> svuint64_t {
1757    unsafe extern "unadjusted" {
1758        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv2i64")]
1759        fn _svadrb_u64base_s64offset(bases: svint64_t, offsets: svint64_t) -> svint64_t;
1760    }
1761    unsafe { _svadrb_u64base_s64offset(bases.as_signed(), offsets).as_unsigned() }
1762}
1763#[doc = "Compute vector addresses for 16-bit data"]
1764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[s64]index)"]
1765#[inline(always)]
1766#[target_feature(enable = "sve")]
1767#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1768#[cfg_attr(test, assert_instr(adr))]
1769pub fn svadrh_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t {
1770    unsafe extern "unadjusted" {
1771        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv2i64")]
1772        fn _svadrh_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t;
1773    }
1774    unsafe { _svadrh_u64base_s64index(bases.as_signed(), indices).as_unsigned() }
1775}
1776#[doc = "Compute vector addresses for 32-bit data"]
1777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[s64]index)"]
1778#[inline(always)]
1779#[target_feature(enable = "sve")]
1780#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1781#[cfg_attr(test, assert_instr(adr))]
1782pub fn svadrw_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t {
1783    unsafe extern "unadjusted" {
1784        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv2i64")]
1785        fn _svadrw_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t;
1786    }
1787    unsafe { _svadrw_u64base_s64index(bases.as_signed(), indices).as_unsigned() }
1788}
1789#[doc = "Compute vector addresses for 64-bit data"]
1790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[s64]index)"]
1791#[inline(always)]
1792#[target_feature(enable = "sve")]
1793#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1794#[cfg_attr(test, assert_instr(adr))]
1795pub fn svadrd_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t {
1796    unsafe extern "unadjusted" {
1797        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv2i64")]
1798        fn _svadrd_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t;
1799    }
1800    unsafe { _svadrd_u64base_s64index(bases.as_signed(), indices).as_unsigned() }
1801}
1802#[doc = "Compute vector addresses for 8-bit data"]
1803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[u64]offset)"]
1804#[inline(always)]
1805#[target_feature(enable = "sve")]
1806#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1807#[cfg_attr(test, assert_instr(adr))]
1808pub fn svadrb_u64base_u64offset(bases: svuint64_t, offsets: svuint64_t) -> svuint64_t {
1809    unsafe { svadrb_u64base_s64offset(bases, offsets.as_signed()) }
1810}
1811#[doc = "Compute vector addresses for 16-bit data"]
1812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[u64]index)"]
1813#[inline(always)]
1814#[target_feature(enable = "sve")]
1815#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1816#[cfg_attr(test, assert_instr(adr))]
1817pub fn svadrh_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t {
1818    unsafe { svadrh_u64base_s64index(bases, indices.as_signed()) }
1819}
1820#[doc = "Compute vector addresses for 32-bit data"]
1821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[u64]index)"]
1822#[inline(always)]
1823#[target_feature(enable = "sve")]
1824#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1825#[cfg_attr(test, assert_instr(adr))]
1826pub fn svadrw_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t {
1827    unsafe { svadrw_u64base_s64index(bases, indices.as_signed()) }
1828}
1829#[doc = "Compute vector addresses for 64-bit data"]
1830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[u64]index)"]
1831#[inline(always)]
1832#[target_feature(enable = "sve")]
1833#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1834#[cfg_attr(test, assert_instr(adr))]
1835pub fn svadrd_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t {
1836    unsafe { svadrd_u64base_s64index(bases, indices.as_signed()) }
1837}
1838#[doc = "Bitwise AND"]
1839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_b]_z)"]
1840#[inline(always)]
1841#[target_feature(enable = "sve")]
1842#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1843#[cfg_attr(test, assert_instr(and))]
1844pub fn svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
1845    unsafe extern "unadjusted" {
1846        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.z.nvx16i1")]
1847        fn _svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
1848    }
1849    unsafe { _svand_b_z(pg, op1, op2) }
1850}
1851#[doc = "Bitwise AND"]
1852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_m)"]
1853#[inline(always)]
1854#[target_feature(enable = "sve")]
1855#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1856#[cfg_attr(test, assert_instr(and))]
1857pub fn svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
1858    unsafe extern "unadjusted" {
1859        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv16i8")]
1860        fn _svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
1861    }
1862    unsafe { _svand_s8_m(pg, op1, op2) }
1863}
1864#[doc = "Bitwise AND"]
1865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_m)"]
1866#[inline(always)]
1867#[target_feature(enable = "sve")]
1868#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1869#[cfg_attr(test, assert_instr(and))]
1870pub fn svand_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
1871    svand_s8_m(pg, op1, svdup_n_s8(op2))
1872}
1873#[doc = "Bitwise AND"]
1874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_x)"]
1875#[inline(always)]
1876#[target_feature(enable = "sve")]
1877#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1878#[cfg_attr(test, assert_instr(and))]
1879pub fn svand_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
1880    svand_s8_m(pg, op1, op2)
1881}
1882#[doc = "Bitwise AND"]
1883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_x)"]
1884#[inline(always)]
1885#[target_feature(enable = "sve")]
1886#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1887#[cfg_attr(test, assert_instr(and))]
1888pub fn svand_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
1889    svand_s8_x(pg, op1, svdup_n_s8(op2))
1890}
1891#[doc = "Bitwise AND"]
1892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_z)"]
1893#[inline(always)]
1894#[target_feature(enable = "sve")]
1895#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1896#[cfg_attr(test, assert_instr(and))]
1897pub fn svand_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
1898    svand_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
1899}
1900#[doc = "Bitwise AND"]
1901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_z)"]
1902#[inline(always)]
1903#[target_feature(enable = "sve")]
1904#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1905#[cfg_attr(test, assert_instr(and))]
1906pub fn svand_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
1907    svand_s8_z(pg, op1, svdup_n_s8(op2))
1908}
1909#[doc = "Bitwise AND"]
1910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_m)"]
1911#[inline(always)]
1912#[target_feature(enable = "sve")]
1913#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1914#[cfg_attr(test, assert_instr(and))]
1915pub fn svand_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
1916    unsafe extern "unadjusted" {
1917        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv8i16")]
1918        fn _svand_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
1919    }
1920    unsafe { _svand_s16_m(pg.sve_into(), op1, op2) }
1921}
1922#[doc = "Bitwise AND"]
1923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_m)"]
1924#[inline(always)]
1925#[target_feature(enable = "sve")]
1926#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1927#[cfg_attr(test, assert_instr(and))]
1928pub fn svand_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
1929    svand_s16_m(pg, op1, svdup_n_s16(op2))
1930}
1931#[doc = "Bitwise AND"]
1932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_x)"]
1933#[inline(always)]
1934#[target_feature(enable = "sve")]
1935#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1936#[cfg_attr(test, assert_instr(and))]
1937pub fn svand_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
1938    svand_s16_m(pg, op1, op2)
1939}
1940#[doc = "Bitwise AND"]
1941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_x)"]
1942#[inline(always)]
1943#[target_feature(enable = "sve")]
1944#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1945#[cfg_attr(test, assert_instr(and))]
1946pub fn svand_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
1947    svand_s16_x(pg, op1, svdup_n_s16(op2))
1948}
1949#[doc = "Bitwise AND"]
1950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_z)"]
1951#[inline(always)]
1952#[target_feature(enable = "sve")]
1953#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1954#[cfg_attr(test, assert_instr(and))]
1955pub fn svand_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
1956    svand_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
1957}
1958#[doc = "Bitwise AND"]
1959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_z)"]
1960#[inline(always)]
1961#[target_feature(enable = "sve")]
1962#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1963#[cfg_attr(test, assert_instr(and))]
1964pub fn svand_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
1965    svand_s16_z(pg, op1, svdup_n_s16(op2))
1966}
1967#[doc = "Bitwise AND"]
1968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_m)"]
1969#[inline(always)]
1970#[target_feature(enable = "sve")]
1971#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1972#[cfg_attr(test, assert_instr(and))]
1973pub fn svand_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
1974    unsafe extern "unadjusted" {
1975        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv4i32")]
1976        fn _svand_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
1977    }
1978    unsafe { _svand_s32_m(pg.sve_into(), op1, op2) }
1979}
1980#[doc = "Bitwise AND"]
1981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_m)"]
1982#[inline(always)]
1983#[target_feature(enable = "sve")]
1984#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1985#[cfg_attr(test, assert_instr(and))]
1986pub fn svand_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
1987    svand_s32_m(pg, op1, svdup_n_s32(op2))
1988}
1989#[doc = "Bitwise AND"]
1990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_x)"]
1991#[inline(always)]
1992#[target_feature(enable = "sve")]
1993#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
1994#[cfg_attr(test, assert_instr(and))]
1995pub fn svand_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
1996    svand_s32_m(pg, op1, op2)
1997}
1998#[doc = "Bitwise AND"]
1999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_x)"]
2000#[inline(always)]
2001#[target_feature(enable = "sve")]
2002#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2003#[cfg_attr(test, assert_instr(and))]
2004pub fn svand_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
2005    svand_s32_x(pg, op1, svdup_n_s32(op2))
2006}
2007#[doc = "Bitwise AND"]
2008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_z)"]
2009#[inline(always)]
2010#[target_feature(enable = "sve")]
2011#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2012#[cfg_attr(test, assert_instr(and))]
2013pub fn svand_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
2014    svand_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
2015}
2016#[doc = "Bitwise AND"]
2017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_z)"]
2018#[inline(always)]
2019#[target_feature(enable = "sve")]
2020#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2021#[cfg_attr(test, assert_instr(and))]
2022pub fn svand_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
2023    svand_s32_z(pg, op1, svdup_n_s32(op2))
2024}
2025#[doc = "Bitwise AND"]
2026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_m)"]
2027#[inline(always)]
2028#[target_feature(enable = "sve")]
2029#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2030#[cfg_attr(test, assert_instr(and))]
2031pub fn svand_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
2032    unsafe extern "unadjusted" {
2033        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv2i64")]
2034        fn _svand_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
2035    }
2036    unsafe { _svand_s64_m(pg.sve_into(), op1, op2) }
2037}
2038#[doc = "Bitwise AND"]
2039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_m)"]
2040#[inline(always)]
2041#[target_feature(enable = "sve")]
2042#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2043#[cfg_attr(test, assert_instr(and))]
2044pub fn svand_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
2045    svand_s64_m(pg, op1, svdup_n_s64(op2))
2046}
2047#[doc = "Bitwise AND"]
2048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_x)"]
2049#[inline(always)]
2050#[target_feature(enable = "sve")]
2051#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2052#[cfg_attr(test, assert_instr(and))]
2053pub fn svand_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
2054    svand_s64_m(pg, op1, op2)
2055}
2056#[doc = "Bitwise AND"]
2057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_x)"]
2058#[inline(always)]
2059#[target_feature(enable = "sve")]
2060#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2061#[cfg_attr(test, assert_instr(and))]
2062pub fn svand_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
2063    svand_s64_x(pg, op1, svdup_n_s64(op2))
2064}
2065#[doc = "Bitwise AND"]
2066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_z)"]
2067#[inline(always)]
2068#[target_feature(enable = "sve")]
2069#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2070#[cfg_attr(test, assert_instr(and))]
2071pub fn svand_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
2072    svand_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
2073}
2074#[doc = "Bitwise AND"]
2075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_z)"]
2076#[inline(always)]
2077#[target_feature(enable = "sve")]
2078#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2079#[cfg_attr(test, assert_instr(and))]
2080pub fn svand_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
2081    svand_s64_z(pg, op1, svdup_n_s64(op2))
2082}
2083#[doc = "Bitwise AND"]
2084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_m)"]
2085#[inline(always)]
2086#[target_feature(enable = "sve")]
2087#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2088#[cfg_attr(test, assert_instr(and))]
2089pub fn svand_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
2090    unsafe { svand_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
2091}
2092#[doc = "Bitwise AND"]
2093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_m)"]
2094#[inline(always)]
2095#[target_feature(enable = "sve")]
2096#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2097#[cfg_attr(test, assert_instr(and))]
2098pub fn svand_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
2099    svand_u8_m(pg, op1, svdup_n_u8(op2))
2100}
2101#[doc = "Bitwise AND"]
2102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_x)"]
2103#[inline(always)]
2104#[target_feature(enable = "sve")]
2105#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2106#[cfg_attr(test, assert_instr(and))]
2107pub fn svand_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
2108    svand_u8_m(pg, op1, op2)
2109}
2110#[doc = "Bitwise AND"]
2111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_x)"]
2112#[inline(always)]
2113#[target_feature(enable = "sve")]
2114#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2115#[cfg_attr(test, assert_instr(and))]
2116pub fn svand_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
2117    svand_u8_x(pg, op1, svdup_n_u8(op2))
2118}
2119#[doc = "Bitwise AND"]
2120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_z)"]
2121#[inline(always)]
2122#[target_feature(enable = "sve")]
2123#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2124#[cfg_attr(test, assert_instr(and))]
2125pub fn svand_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
2126    svand_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
2127}
2128#[doc = "Bitwise AND"]
2129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_z)"]
2130#[inline(always)]
2131#[target_feature(enable = "sve")]
2132#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2133#[cfg_attr(test, assert_instr(and))]
2134pub fn svand_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
2135    svand_u8_z(pg, op1, svdup_n_u8(op2))
2136}
2137#[doc = "Bitwise AND"]
2138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_m)"]
2139#[inline(always)]
2140#[target_feature(enable = "sve")]
2141#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2142#[cfg_attr(test, assert_instr(and))]
2143pub fn svand_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
2144    unsafe { svand_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
2145}
2146#[doc = "Bitwise AND"]
2147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_m)"]
2148#[inline(always)]
2149#[target_feature(enable = "sve")]
2150#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2151#[cfg_attr(test, assert_instr(and))]
2152pub fn svand_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
2153    svand_u16_m(pg, op1, svdup_n_u16(op2))
2154}
2155#[doc = "Bitwise AND"]
2156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_x)"]
2157#[inline(always)]
2158#[target_feature(enable = "sve")]
2159#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2160#[cfg_attr(test, assert_instr(and))]
2161pub fn svand_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
2162    svand_u16_m(pg, op1, op2)
2163}
2164#[doc = "Bitwise AND"]
2165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_x)"]
2166#[inline(always)]
2167#[target_feature(enable = "sve")]
2168#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2169#[cfg_attr(test, assert_instr(and))]
2170pub fn svand_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
2171    svand_u16_x(pg, op1, svdup_n_u16(op2))
2172}
2173#[doc = "Bitwise AND"]
2174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_z)"]
2175#[inline(always)]
2176#[target_feature(enable = "sve")]
2177#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2178#[cfg_attr(test, assert_instr(and))]
2179pub fn svand_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
2180    svand_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
2181}
2182#[doc = "Bitwise AND"]
2183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_z)"]
2184#[inline(always)]
2185#[target_feature(enable = "sve")]
2186#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2187#[cfg_attr(test, assert_instr(and))]
2188pub fn svand_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
2189    svand_u16_z(pg, op1, svdup_n_u16(op2))
2190}
2191#[doc = "Bitwise AND"]
2192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_m)"]
2193#[inline(always)]
2194#[target_feature(enable = "sve")]
2195#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2196#[cfg_attr(test, assert_instr(and))]
2197pub fn svand_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
2198    unsafe { svand_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
2199}
2200#[doc = "Bitwise AND"]
2201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_m)"]
2202#[inline(always)]
2203#[target_feature(enable = "sve")]
2204#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2205#[cfg_attr(test, assert_instr(and))]
2206pub fn svand_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
2207    svand_u32_m(pg, op1, svdup_n_u32(op2))
2208}
2209#[doc = "Bitwise AND"]
2210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_x)"]
2211#[inline(always)]
2212#[target_feature(enable = "sve")]
2213#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2214#[cfg_attr(test, assert_instr(and))]
2215pub fn svand_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
2216    svand_u32_m(pg, op1, op2)
2217}
2218#[doc = "Bitwise AND"]
2219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_x)"]
2220#[inline(always)]
2221#[target_feature(enable = "sve")]
2222#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2223#[cfg_attr(test, assert_instr(and))]
2224pub fn svand_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
2225    svand_u32_x(pg, op1, svdup_n_u32(op2))
2226}
2227#[doc = "Bitwise AND"]
2228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_z)"]
2229#[inline(always)]
2230#[target_feature(enable = "sve")]
2231#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2232#[cfg_attr(test, assert_instr(and))]
2233pub fn svand_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
2234    svand_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
2235}
2236#[doc = "Bitwise AND"]
2237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_z)"]
2238#[inline(always)]
2239#[target_feature(enable = "sve")]
2240#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2241#[cfg_attr(test, assert_instr(and))]
2242pub fn svand_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
2243    svand_u32_z(pg, op1, svdup_n_u32(op2))
2244}
2245#[doc = "Bitwise AND"]
2246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_m)"]
2247#[inline(always)]
2248#[target_feature(enable = "sve")]
2249#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2250#[cfg_attr(test, assert_instr(and))]
2251pub fn svand_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
2252    unsafe { svand_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
2253}
2254#[doc = "Bitwise AND"]
2255#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_m)"]
2256#[inline(always)]
2257#[target_feature(enable = "sve")]
2258#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2259#[cfg_attr(test, assert_instr(and))]
2260pub fn svand_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
2261    svand_u64_m(pg, op1, svdup_n_u64(op2))
2262}
2263#[doc = "Bitwise AND"]
2264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_x)"]
2265#[inline(always)]
2266#[target_feature(enable = "sve")]
2267#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2268#[cfg_attr(test, assert_instr(and))]
2269pub fn svand_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
2270    svand_u64_m(pg, op1, op2)
2271}
2272#[doc = "Bitwise AND"]
2273#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_x)"]
2274#[inline(always)]
2275#[target_feature(enable = "sve")]
2276#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2277#[cfg_attr(test, assert_instr(and))]
2278pub fn svand_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
2279    svand_u64_x(pg, op1, svdup_n_u64(op2))
2280}
2281#[doc = "Bitwise AND"]
2282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_z)"]
2283#[inline(always)]
2284#[target_feature(enable = "sve")]
2285#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2286#[cfg_attr(test, assert_instr(and))]
2287pub fn svand_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
2288    svand_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
2289}
2290#[doc = "Bitwise AND"]
2291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_z)"]
2292#[inline(always)]
2293#[target_feature(enable = "sve")]
2294#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2295#[cfg_attr(test, assert_instr(and))]
2296pub fn svand_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
2297    svand_u64_z(pg, op1, svdup_n_u64(op2))
2298}
2299#[doc = "Bitwise AND reduction to scalar"]
2300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s8])"]
2301#[inline(always)]
2302#[target_feature(enable = "sve")]
2303#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2304#[cfg_attr(test, assert_instr(andv))]
2305pub fn svandv_s8(pg: svbool_t, op: svint8_t) -> i8 {
2306    unsafe extern "unadjusted" {
2307        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv16i8")]
2308        fn _svandv_s8(pg: svbool_t, op: svint8_t) -> i8;
2309    }
2310    unsafe { _svandv_s8(pg, op) }
2311}
2312#[doc = "Bitwise AND reduction to scalar"]
2313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s16])"]
2314#[inline(always)]
2315#[target_feature(enable = "sve")]
2316#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2317#[cfg_attr(test, assert_instr(andv))]
2318pub fn svandv_s16(pg: svbool_t, op: svint16_t) -> i16 {
2319    unsafe extern "unadjusted" {
2320        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv8i16")]
2321        fn _svandv_s16(pg: svbool8_t, op: svint16_t) -> i16;
2322    }
2323    unsafe { _svandv_s16(pg.sve_into(), op) }
2324}
2325#[doc = "Bitwise AND reduction to scalar"]
2326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s32])"]
2327#[inline(always)]
2328#[target_feature(enable = "sve")]
2329#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2330#[cfg_attr(test, assert_instr(andv))]
2331pub fn svandv_s32(pg: svbool_t, op: svint32_t) -> i32 {
2332    unsafe extern "unadjusted" {
2333        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv4i32")]
2334        fn _svandv_s32(pg: svbool4_t, op: svint32_t) -> i32;
2335    }
2336    unsafe { _svandv_s32(pg.sve_into(), op) }
2337}
2338#[doc = "Bitwise AND reduction to scalar"]
2339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s64])"]
2340#[inline(always)]
2341#[target_feature(enable = "sve")]
2342#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2343#[cfg_attr(test, assert_instr(andv))]
2344pub fn svandv_s64(pg: svbool_t, op: svint64_t) -> i64 {
2345    unsafe extern "unadjusted" {
2346        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv2i64")]
2347        fn _svandv_s64(pg: svbool2_t, op: svint64_t) -> i64;
2348    }
2349    unsafe { _svandv_s64(pg.sve_into(), op) }
2350}
2351#[doc = "Bitwise AND reduction to scalar"]
2352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u8])"]
2353#[inline(always)]
2354#[target_feature(enable = "sve")]
2355#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2356#[cfg_attr(test, assert_instr(andv))]
2357pub fn svandv_u8(pg: svbool_t, op: svuint8_t) -> u8 {
2358    unsafe { svandv_s8(pg, op.as_signed()).as_unsigned() }
2359}
2360#[doc = "Bitwise AND reduction to scalar"]
2361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u16])"]
2362#[inline(always)]
2363#[target_feature(enable = "sve")]
2364#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2365#[cfg_attr(test, assert_instr(andv))]
2366pub fn svandv_u16(pg: svbool_t, op: svuint16_t) -> u16 {
2367    unsafe { svandv_s16(pg, op.as_signed()).as_unsigned() }
2368}
2369#[doc = "Bitwise AND reduction to scalar"]
2370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u32])"]
2371#[inline(always)]
2372#[target_feature(enable = "sve")]
2373#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2374#[cfg_attr(test, assert_instr(andv))]
2375pub fn svandv_u32(pg: svbool_t, op: svuint32_t) -> u32 {
2376    unsafe { svandv_s32(pg, op.as_signed()).as_unsigned() }
2377}
2378#[doc = "Bitwise AND reduction to scalar"]
2379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u64])"]
2380#[inline(always)]
2381#[target_feature(enable = "sve")]
2382#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2383#[cfg_attr(test, assert_instr(andv))]
2384pub fn svandv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
2385    unsafe { svandv_s64(pg, op.as_signed()).as_unsigned() }
2386}
2387#[doc = "Arithmetic shift right"]
2388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_m)"]
2389#[inline(always)]
2390#[target_feature(enable = "sve")]
2391#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2392#[cfg_attr(test, assert_instr(asr))]
2393pub fn svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
2394    unsafe extern "unadjusted" {
2395        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv16i8")]
2396        fn _svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
2397    }
2398    unsafe { _svasr_s8_m(pg, op1, op2.as_signed()) }
2399}
2400#[doc = "Arithmetic shift right"]
2401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_m)"]
2402#[inline(always)]
2403#[target_feature(enable = "sve")]
2404#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2405#[cfg_attr(test, assert_instr(asr))]
2406pub fn svasr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
2407    svasr_s8_m(pg, op1, svdup_n_u8(op2))
2408}
2409#[doc = "Arithmetic shift right"]
2410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_x)"]
2411#[inline(always)]
2412#[target_feature(enable = "sve")]
2413#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2414#[cfg_attr(test, assert_instr(asr))]
2415pub fn svasr_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
2416    svasr_s8_m(pg, op1, op2)
2417}
2418#[doc = "Arithmetic shift right"]
2419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_x)"]
2420#[inline(always)]
2421#[target_feature(enable = "sve")]
2422#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2423#[cfg_attr(test, assert_instr(asr))]
2424pub fn svasr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
2425    svasr_s8_x(pg, op1, svdup_n_u8(op2))
2426}
2427#[doc = "Arithmetic shift right"]
2428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_z)"]
2429#[inline(always)]
2430#[target_feature(enable = "sve")]
2431#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2432#[cfg_attr(test, assert_instr(asr))]
2433pub fn svasr_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
2434    svasr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
2435}
2436#[doc = "Arithmetic shift right"]
2437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_z)"]
2438#[inline(always)]
2439#[target_feature(enable = "sve")]
2440#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2441#[cfg_attr(test, assert_instr(asr))]
2442pub fn svasr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
2443    svasr_s8_z(pg, op1, svdup_n_u8(op2))
2444}
2445#[doc = "Arithmetic shift right"]
2446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_m)"]
2447#[inline(always)]
2448#[target_feature(enable = "sve")]
2449#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2450#[cfg_attr(test, assert_instr(asr))]
2451pub fn svasr_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
2452    unsafe extern "unadjusted" {
2453        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv8i16")]
2454        fn _svasr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
2455    }
2456    unsafe { _svasr_s16_m(pg.sve_into(), op1, op2.as_signed()) }
2457}
2458#[doc = "Arithmetic shift right"]
2459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_m)"]
2460#[inline(always)]
2461#[target_feature(enable = "sve")]
2462#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2463#[cfg_attr(test, assert_instr(asr))]
2464pub fn svasr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
2465    svasr_s16_m(pg, op1, svdup_n_u16(op2))
2466}
2467#[doc = "Arithmetic shift right"]
2468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_x)"]
2469#[inline(always)]
2470#[target_feature(enable = "sve")]
2471#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2472#[cfg_attr(test, assert_instr(asr))]
2473pub fn svasr_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
2474    svasr_s16_m(pg, op1, op2)
2475}
2476#[doc = "Arithmetic shift right"]
2477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_x)"]
2478#[inline(always)]
2479#[target_feature(enable = "sve")]
2480#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2481#[cfg_attr(test, assert_instr(asr))]
2482pub fn svasr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
2483    svasr_s16_x(pg, op1, svdup_n_u16(op2))
2484}
2485#[doc = "Arithmetic shift right"]
2486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_z)"]
2487#[inline(always)]
2488#[target_feature(enable = "sve")]
2489#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2490#[cfg_attr(test, assert_instr(asr))]
2491pub fn svasr_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
2492    svasr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
2493}
2494#[doc = "Arithmetic shift right"]
2495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_z)"]
2496#[inline(always)]
2497#[target_feature(enable = "sve")]
2498#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2499#[cfg_attr(test, assert_instr(asr))]
2500pub fn svasr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
2501    svasr_s16_z(pg, op1, svdup_n_u16(op2))
2502}
2503#[doc = "Arithmetic shift right"]
2504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_m)"]
2505#[inline(always)]
2506#[target_feature(enable = "sve")]
2507#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2508#[cfg_attr(test, assert_instr(asr))]
2509pub fn svasr_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
2510    unsafe extern "unadjusted" {
2511        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv4i32")]
2512        fn _svasr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
2513    }
2514    unsafe { _svasr_s32_m(pg.sve_into(), op1, op2.as_signed()) }
2515}
2516#[doc = "Arithmetic shift right"]
2517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_m)"]
2518#[inline(always)]
2519#[target_feature(enable = "sve")]
2520#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2521#[cfg_attr(test, assert_instr(asr))]
2522pub fn svasr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
2523    svasr_s32_m(pg, op1, svdup_n_u32(op2))
2524}
2525#[doc = "Arithmetic shift right"]
2526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_x)"]
2527#[inline(always)]
2528#[target_feature(enable = "sve")]
2529#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2530#[cfg_attr(test, assert_instr(asr))]
2531pub fn svasr_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
2532    svasr_s32_m(pg, op1, op2)
2533}
2534#[doc = "Arithmetic shift right"]
2535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_x)"]
2536#[inline(always)]
2537#[target_feature(enable = "sve")]
2538#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2539#[cfg_attr(test, assert_instr(asr))]
2540pub fn svasr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
2541    svasr_s32_x(pg, op1, svdup_n_u32(op2))
2542}
2543#[doc = "Arithmetic shift right"]
2544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_z)"]
2545#[inline(always)]
2546#[target_feature(enable = "sve")]
2547#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2548#[cfg_attr(test, assert_instr(asr))]
2549pub fn svasr_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
2550    svasr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
2551}
2552#[doc = "Arithmetic shift right"]
2553#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_z)"]
2554#[inline(always)]
2555#[target_feature(enable = "sve")]
2556#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2557#[cfg_attr(test, assert_instr(asr))]
2558pub fn svasr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
2559    svasr_s32_z(pg, op1, svdup_n_u32(op2))
2560}
2561#[doc = "Arithmetic shift right"]
2562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_m)"]
2563#[inline(always)]
2564#[target_feature(enable = "sve")]
2565#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2566#[cfg_attr(test, assert_instr(asr))]
2567pub fn svasr_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
2568    unsafe extern "unadjusted" {
2569        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv2i64")]
2570        fn _svasr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
2571    }
2572    unsafe { _svasr_s64_m(pg.sve_into(), op1, op2.as_signed()) }
2573}
2574#[doc = "Arithmetic shift right"]
2575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_m)"]
2576#[inline(always)]
2577#[target_feature(enable = "sve")]
2578#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2579#[cfg_attr(test, assert_instr(asr))]
2580pub fn svasr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
2581    svasr_s64_m(pg, op1, svdup_n_u64(op2))
2582}
2583#[doc = "Arithmetic shift right"]
2584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_x)"]
2585#[inline(always)]
2586#[target_feature(enable = "sve")]
2587#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2588#[cfg_attr(test, assert_instr(asr))]
2589pub fn svasr_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
2590    svasr_s64_m(pg, op1, op2)
2591}
2592#[doc = "Arithmetic shift right"]
2593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_x)"]
2594#[inline(always)]
2595#[target_feature(enable = "sve")]
2596#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2597#[cfg_attr(test, assert_instr(asr))]
2598pub fn svasr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
2599    svasr_s64_x(pg, op1, svdup_n_u64(op2))
2600}
2601#[doc = "Arithmetic shift right"]
2602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_z)"]
2603#[inline(always)]
2604#[target_feature(enable = "sve")]
2605#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2606#[cfg_attr(test, assert_instr(asr))]
2607pub fn svasr_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
2608    svasr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
2609}
2610#[doc = "Arithmetic shift right"]
2611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_z)"]
2612#[inline(always)]
2613#[target_feature(enable = "sve")]
2614#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2615#[cfg_attr(test, assert_instr(asr))]
2616pub fn svasr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
2617    svasr_s64_z(pg, op1, svdup_n_u64(op2))
2618}
2619#[doc = "Arithmetic shift right"]
2620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_m)"]
2621#[inline(always)]
2622#[target_feature(enable = "sve")]
2623#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2624#[cfg_attr(test, assert_instr(asr))]
2625pub fn svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
2626    unsafe extern "unadjusted" {
2627        #[cfg_attr(
2628            target_arch = "aarch64",
2629            link_name = "llvm.aarch64.sve.asr.wide.nxv16i8"
2630        )]
2631        fn _svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t;
2632    }
2633    unsafe { _svasr_wide_s8_m(pg, op1, op2.as_signed()) }
2634}
2635#[doc = "Arithmetic shift right"]
2636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_m)"]
2637#[inline(always)]
2638#[target_feature(enable = "sve")]
2639#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2640#[cfg_attr(test, assert_instr(asr))]
2641pub fn svasr_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
2642    svasr_wide_s8_m(pg, op1, svdup_n_u64(op2))
2643}
2644#[doc = "Arithmetic shift right"]
2645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_x)"]
2646#[inline(always)]
2647#[target_feature(enable = "sve")]
2648#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2649#[cfg_attr(test, assert_instr(asr))]
2650pub fn svasr_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
2651    svasr_wide_s8_m(pg, op1, op2)
2652}
2653#[doc = "Arithmetic shift right"]
2654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_x)"]
2655#[inline(always)]
2656#[target_feature(enable = "sve")]
2657#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2658#[cfg_attr(test, assert_instr(asr))]
2659pub fn svasr_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
2660    svasr_wide_s8_x(pg, op1, svdup_n_u64(op2))
2661}
2662#[doc = "Arithmetic shift right"]
2663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_z)"]
2664#[inline(always)]
2665#[target_feature(enable = "sve")]
2666#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2667#[cfg_attr(test, assert_instr(asr))]
2668pub fn svasr_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
2669    svasr_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
2670}
2671#[doc = "Arithmetic shift right"]
2672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_z)"]
2673#[inline(always)]
2674#[target_feature(enable = "sve")]
2675#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2676#[cfg_attr(test, assert_instr(asr))]
2677pub fn svasr_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
2678    svasr_wide_s8_z(pg, op1, svdup_n_u64(op2))
2679}
2680#[doc = "Arithmetic shift right"]
2681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_m)"]
2682#[inline(always)]
2683#[target_feature(enable = "sve")]
2684#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2685#[cfg_attr(test, assert_instr(asr))]
2686pub fn svasr_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
2687    unsafe extern "unadjusted" {
2688        #[cfg_attr(
2689            target_arch = "aarch64",
2690            link_name = "llvm.aarch64.sve.asr.wide.nxv8i16"
2691        )]
2692        fn _svasr_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t;
2693    }
2694    unsafe { _svasr_wide_s16_m(pg.sve_into(), op1, op2.as_signed()) }
2695}
2696#[doc = "Arithmetic shift right"]
2697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_m)"]
2698#[inline(always)]
2699#[target_feature(enable = "sve")]
2700#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2701#[cfg_attr(test, assert_instr(asr))]
2702pub fn svasr_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
2703    svasr_wide_s16_m(pg, op1, svdup_n_u64(op2))
2704}
2705#[doc = "Arithmetic shift right"]
2706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_x)"]
2707#[inline(always)]
2708#[target_feature(enable = "sve")]
2709#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2710#[cfg_attr(test, assert_instr(asr))]
2711pub fn svasr_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
2712    svasr_wide_s16_m(pg, op1, op2)
2713}
2714#[doc = "Arithmetic shift right"]
2715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_x)"]
2716#[inline(always)]
2717#[target_feature(enable = "sve")]
2718#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2719#[cfg_attr(test, assert_instr(asr))]
2720pub fn svasr_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
2721    svasr_wide_s16_x(pg, op1, svdup_n_u64(op2))
2722}
2723#[doc = "Arithmetic shift right"]
2724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_z)"]
2725#[inline(always)]
2726#[target_feature(enable = "sve")]
2727#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2728#[cfg_attr(test, assert_instr(asr))]
2729pub fn svasr_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
2730    svasr_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
2731}
2732#[doc = "Arithmetic shift right"]
2733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_z)"]
2734#[inline(always)]
2735#[target_feature(enable = "sve")]
2736#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2737#[cfg_attr(test, assert_instr(asr))]
2738pub fn svasr_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
2739    svasr_wide_s16_z(pg, op1, svdup_n_u64(op2))
2740}
2741#[doc = "Arithmetic shift right"]
2742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_m)"]
2743#[inline(always)]
2744#[target_feature(enable = "sve")]
2745#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2746#[cfg_attr(test, assert_instr(asr))]
2747pub fn svasr_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
2748    unsafe extern "unadjusted" {
2749        #[cfg_attr(
2750            target_arch = "aarch64",
2751            link_name = "llvm.aarch64.sve.asr.wide.nxv4i32"
2752        )]
2753        fn _svasr_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t;
2754    }
2755    unsafe { _svasr_wide_s32_m(pg.sve_into(), op1, op2.as_signed()) }
2756}
2757#[doc = "Arithmetic shift right"]
2758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_m)"]
2759#[inline(always)]
2760#[target_feature(enable = "sve")]
2761#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2762#[cfg_attr(test, assert_instr(asr))]
2763pub fn svasr_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
2764    svasr_wide_s32_m(pg, op1, svdup_n_u64(op2))
2765}
2766#[doc = "Arithmetic shift right"]
2767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_x)"]
2768#[inline(always)]
2769#[target_feature(enable = "sve")]
2770#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2771#[cfg_attr(test, assert_instr(asr))]
2772pub fn svasr_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
2773    svasr_wide_s32_m(pg, op1, op2)
2774}
2775#[doc = "Arithmetic shift right"]
2776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_x)"]
2777#[inline(always)]
2778#[target_feature(enable = "sve")]
2779#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2780#[cfg_attr(test, assert_instr(asr))]
2781pub fn svasr_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
2782    svasr_wide_s32_x(pg, op1, svdup_n_u64(op2))
2783}
2784#[doc = "Arithmetic shift right"]
2785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_z)"]
2786#[inline(always)]
2787#[target_feature(enable = "sve")]
2788#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2789#[cfg_attr(test, assert_instr(asr))]
2790pub fn svasr_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
2791    svasr_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
2792}
2793#[doc = "Arithmetic shift right"]
2794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_z)"]
2795#[inline(always)]
2796#[target_feature(enable = "sve")]
2797#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2798#[cfg_attr(test, assert_instr(asr))]
2799pub fn svasr_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
2800    svasr_wide_s32_z(pg, op1, svdup_n_u64(op2))
2801}
2802#[doc = "Arithmetic shift right for divide by immediate"]
2803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_m)"]
2804#[inline(always)]
2805#[target_feature(enable = "sve")]
2806#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2807#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2808pub fn svasrd_n_s8_m<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
2809    static_assert_range!(IMM2, 1..=8);
2810    unsafe extern "unadjusted" {
2811        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv16i8")]
2812        fn _svasrd_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t;
2813    }
2814    unsafe { _svasrd_n_s8_m(pg, op1, IMM2) }
2815}
2816#[doc = "Arithmetic shift right for divide by immediate"]
2817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_x)"]
2818#[inline(always)]
2819#[target_feature(enable = "sve")]
2820#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2821#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2822pub fn svasrd_n_s8_x<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
2823    svasrd_n_s8_m::<IMM2>(pg, op1)
2824}
2825#[doc = "Arithmetic shift right for divide by immediate"]
2826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_z)"]
2827#[inline(always)]
2828#[target_feature(enable = "sve")]
2829#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2830#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2831pub fn svasrd_n_s8_z<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
2832    svasrd_n_s8_m::<IMM2>(pg, svsel_s8(pg, op1, svdup_n_s8(0)))
2833}
2834#[doc = "Arithmetic shift right for divide by immediate"]
2835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_m)"]
2836#[inline(always)]
2837#[target_feature(enable = "sve")]
2838#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2839#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2840pub fn svasrd_n_s16_m<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
2841    static_assert_range!(IMM2, 1..=16);
2842    unsafe extern "unadjusted" {
2843        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv8i16")]
2844        fn _svasrd_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t;
2845    }
2846    unsafe { _svasrd_n_s16_m(pg.sve_into(), op1, IMM2) }
2847}
2848#[doc = "Arithmetic shift right for divide by immediate"]
2849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_x)"]
2850#[inline(always)]
2851#[target_feature(enable = "sve")]
2852#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2853#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2854pub fn svasrd_n_s16_x<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
2855    svasrd_n_s16_m::<IMM2>(pg, op1)
2856}
2857#[doc = "Arithmetic shift right for divide by immediate"]
2858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_z)"]
2859#[inline(always)]
2860#[target_feature(enable = "sve")]
2861#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2862#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2863pub fn svasrd_n_s16_z<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
2864    svasrd_n_s16_m::<IMM2>(pg, svsel_s16(pg, op1, svdup_n_s16(0)))
2865}
2866#[doc = "Arithmetic shift right for divide by immediate"]
2867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_m)"]
2868#[inline(always)]
2869#[target_feature(enable = "sve")]
2870#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2871#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2872pub fn svasrd_n_s32_m<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
2873    static_assert_range!(IMM2, 1..=32);
2874    unsafe extern "unadjusted" {
2875        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv4i32")]
2876        fn _svasrd_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t;
2877    }
2878    unsafe { _svasrd_n_s32_m(pg.sve_into(), op1, IMM2) }
2879}
2880#[doc = "Arithmetic shift right for divide by immediate"]
2881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_x)"]
2882#[inline(always)]
2883#[target_feature(enable = "sve")]
2884#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2885#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2886pub fn svasrd_n_s32_x<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
2887    svasrd_n_s32_m::<IMM2>(pg, op1)
2888}
2889#[doc = "Arithmetic shift right for divide by immediate"]
2890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_z)"]
2891#[inline(always)]
2892#[target_feature(enable = "sve")]
2893#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2894#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2895pub fn svasrd_n_s32_z<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
2896    svasrd_n_s32_m::<IMM2>(pg, svsel_s32(pg, op1, svdup_n_s32(0)))
2897}
2898#[doc = "Arithmetic shift right for divide by immediate"]
2899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_m)"]
2900#[inline(always)]
2901#[target_feature(enable = "sve")]
2902#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2903#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2904pub fn svasrd_n_s64_m<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
2905    static_assert_range!(IMM2, 1..=64);
2906    unsafe extern "unadjusted" {
2907        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv2i64")]
2908        fn _svasrd_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t;
2909    }
2910    unsafe { _svasrd_n_s64_m(pg.sve_into(), op1, IMM2) }
2911}
2912#[doc = "Arithmetic shift right for divide by immediate"]
2913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_x)"]
2914#[inline(always)]
2915#[target_feature(enable = "sve")]
2916#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2917#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2918pub fn svasrd_n_s64_x<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
2919    svasrd_n_s64_m::<IMM2>(pg, op1)
2920}
2921#[doc = "Arithmetic shift right for divide by immediate"]
2922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_z)"]
2923#[inline(always)]
2924#[target_feature(enable = "sve")]
2925#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2926#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
2927pub fn svasrd_n_s64_z<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
2928    svasrd_n_s64_m::<IMM2>(pg, svsel_s64(pg, op1, svdup_n_s64(0)))
2929}
2930#[doc = "Bitwise clear"]
2931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_b]_z)"]
2932#[inline(always)]
2933#[target_feature(enable = "sve")]
2934#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2935#[cfg_attr(test, assert_instr(bic))]
2936pub fn svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
2937    unsafe extern "unadjusted" {
2938        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.z.nvx16i1")]
2939        fn _svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
2940    }
2941    unsafe { _svbic_b_z(pg, op1, op2) }
2942}
2943#[doc = "Bitwise clear"]
2944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_m)"]
2945#[inline(always)]
2946#[target_feature(enable = "sve")]
2947#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2948#[cfg_attr(test, assert_instr(bic))]
2949pub fn svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
2950    unsafe extern "unadjusted" {
2951        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv16i8")]
2952        fn _svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
2953    }
2954    unsafe { _svbic_s8_m(pg, op1, op2) }
2955}
2956#[doc = "Bitwise clear"]
2957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_m)"]
2958#[inline(always)]
2959#[target_feature(enable = "sve")]
2960#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2961#[cfg_attr(test, assert_instr(bic))]
2962pub fn svbic_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
2963    svbic_s8_m(pg, op1, svdup_n_s8(op2))
2964}
2965#[doc = "Bitwise clear"]
2966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_x)"]
2967#[inline(always)]
2968#[target_feature(enable = "sve")]
2969#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2970#[cfg_attr(test, assert_instr(bic))]
2971pub fn svbic_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
2972    svbic_s8_m(pg, op1, op2)
2973}
2974#[doc = "Bitwise clear"]
2975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_x)"]
2976#[inline(always)]
2977#[target_feature(enable = "sve")]
2978#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2979#[cfg_attr(test, assert_instr(bic))]
2980pub fn svbic_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
2981    svbic_s8_x(pg, op1, svdup_n_s8(op2))
2982}
2983#[doc = "Bitwise clear"]
2984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_z)"]
2985#[inline(always)]
2986#[target_feature(enable = "sve")]
2987#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2988#[cfg_attr(test, assert_instr(bic))]
2989pub fn svbic_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
2990    svbic_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
2991}
2992#[doc = "Bitwise clear"]
2993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_z)"]
2994#[inline(always)]
2995#[target_feature(enable = "sve")]
2996#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
2997#[cfg_attr(test, assert_instr(bic))]
2998pub fn svbic_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
2999    svbic_s8_z(pg, op1, svdup_n_s8(op2))
3000}
3001#[doc = "Bitwise clear"]
3002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_m)"]
3003#[inline(always)]
3004#[target_feature(enable = "sve")]
3005#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3006#[cfg_attr(test, assert_instr(bic))]
3007pub fn svbic_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
3008    unsafe extern "unadjusted" {
3009        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv8i16")]
3010        fn _svbic_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
3011    }
3012    unsafe { _svbic_s16_m(pg.sve_into(), op1, op2) }
3013}
3014#[doc = "Bitwise clear"]
3015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_m)"]
3016#[inline(always)]
3017#[target_feature(enable = "sve")]
3018#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3019#[cfg_attr(test, assert_instr(bic))]
3020pub fn svbic_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
3021    svbic_s16_m(pg, op1, svdup_n_s16(op2))
3022}
3023#[doc = "Bitwise clear"]
3024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_x)"]
3025#[inline(always)]
3026#[target_feature(enable = "sve")]
3027#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3028#[cfg_attr(test, assert_instr(bic))]
3029pub fn svbic_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
3030    svbic_s16_m(pg, op1, op2)
3031}
3032#[doc = "Bitwise clear"]
3033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_x)"]
3034#[inline(always)]
3035#[target_feature(enable = "sve")]
3036#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3037#[cfg_attr(test, assert_instr(bic))]
3038pub fn svbic_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
3039    svbic_s16_x(pg, op1, svdup_n_s16(op2))
3040}
3041#[doc = "Bitwise clear"]
3042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_z)"]
3043#[inline(always)]
3044#[target_feature(enable = "sve")]
3045#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3046#[cfg_attr(test, assert_instr(bic))]
3047pub fn svbic_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
3048    svbic_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
3049}
3050#[doc = "Bitwise clear"]
3051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_z)"]
3052#[inline(always)]
3053#[target_feature(enable = "sve")]
3054#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3055#[cfg_attr(test, assert_instr(bic))]
3056pub fn svbic_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
3057    svbic_s16_z(pg, op1, svdup_n_s16(op2))
3058}
3059#[doc = "Bitwise clear"]
3060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_m)"]
3061#[inline(always)]
3062#[target_feature(enable = "sve")]
3063#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3064#[cfg_attr(test, assert_instr(bic))]
3065pub fn svbic_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
3066    unsafe extern "unadjusted" {
3067        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv4i32")]
3068        fn _svbic_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
3069    }
3070    unsafe { _svbic_s32_m(pg.sve_into(), op1, op2) }
3071}
3072#[doc = "Bitwise clear"]
3073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_m)"]
3074#[inline(always)]
3075#[target_feature(enable = "sve")]
3076#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3077#[cfg_attr(test, assert_instr(bic))]
3078pub fn svbic_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
3079    svbic_s32_m(pg, op1, svdup_n_s32(op2))
3080}
3081#[doc = "Bitwise clear"]
3082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_x)"]
3083#[inline(always)]
3084#[target_feature(enable = "sve")]
3085#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3086#[cfg_attr(test, assert_instr(bic))]
3087pub fn svbic_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
3088    svbic_s32_m(pg, op1, op2)
3089}
3090#[doc = "Bitwise clear"]
3091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_x)"]
3092#[inline(always)]
3093#[target_feature(enable = "sve")]
3094#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3095#[cfg_attr(test, assert_instr(bic))]
3096pub fn svbic_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
3097    svbic_s32_x(pg, op1, svdup_n_s32(op2))
3098}
3099#[doc = "Bitwise clear"]
3100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_z)"]
3101#[inline(always)]
3102#[target_feature(enable = "sve")]
3103#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3104#[cfg_attr(test, assert_instr(bic))]
3105pub fn svbic_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
3106    svbic_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
3107}
3108#[doc = "Bitwise clear"]
3109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_z)"]
3110#[inline(always)]
3111#[target_feature(enable = "sve")]
3112#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3113#[cfg_attr(test, assert_instr(bic))]
3114pub fn svbic_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
3115    svbic_s32_z(pg, op1, svdup_n_s32(op2))
3116}
3117#[doc = "Bitwise clear"]
3118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_m)"]
3119#[inline(always)]
3120#[target_feature(enable = "sve")]
3121#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3122#[cfg_attr(test, assert_instr(bic))]
3123pub fn svbic_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
3124    unsafe extern "unadjusted" {
3125        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv2i64")]
3126        fn _svbic_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
3127    }
3128    unsafe { _svbic_s64_m(pg.sve_into(), op1, op2) }
3129}
3130#[doc = "Bitwise clear"]
3131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_m)"]
3132#[inline(always)]
3133#[target_feature(enable = "sve")]
3134#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3135#[cfg_attr(test, assert_instr(bic))]
3136pub fn svbic_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
3137    svbic_s64_m(pg, op1, svdup_n_s64(op2))
3138}
3139#[doc = "Bitwise clear"]
3140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_x)"]
3141#[inline(always)]
3142#[target_feature(enable = "sve")]
3143#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3144#[cfg_attr(test, assert_instr(bic))]
3145pub fn svbic_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
3146    svbic_s64_m(pg, op1, op2)
3147}
3148#[doc = "Bitwise clear"]
3149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_x)"]
3150#[inline(always)]
3151#[target_feature(enable = "sve")]
3152#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3153#[cfg_attr(test, assert_instr(bic))]
3154pub fn svbic_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
3155    svbic_s64_x(pg, op1, svdup_n_s64(op2))
3156}
3157#[doc = "Bitwise clear"]
3158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_z)"]
3159#[inline(always)]
3160#[target_feature(enable = "sve")]
3161#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3162#[cfg_attr(test, assert_instr(bic))]
3163pub fn svbic_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
3164    svbic_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
3165}
3166#[doc = "Bitwise clear"]
3167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_z)"]
3168#[inline(always)]
3169#[target_feature(enable = "sve")]
3170#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3171#[cfg_attr(test, assert_instr(bic))]
3172pub fn svbic_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
3173    svbic_s64_z(pg, op1, svdup_n_s64(op2))
3174}
3175#[doc = "Bitwise clear"]
3176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_m)"]
3177#[inline(always)]
3178#[target_feature(enable = "sve")]
3179#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3180#[cfg_attr(test, assert_instr(bic))]
3181pub fn svbic_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
3182    unsafe { svbic_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
3183}
3184#[doc = "Bitwise clear"]
3185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_m)"]
3186#[inline(always)]
3187#[target_feature(enable = "sve")]
3188#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3189#[cfg_attr(test, assert_instr(bic))]
3190pub fn svbic_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
3191    svbic_u8_m(pg, op1, svdup_n_u8(op2))
3192}
3193#[doc = "Bitwise clear"]
3194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_x)"]
3195#[inline(always)]
3196#[target_feature(enable = "sve")]
3197#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3198#[cfg_attr(test, assert_instr(bic))]
3199pub fn svbic_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
3200    svbic_u8_m(pg, op1, op2)
3201}
3202#[doc = "Bitwise clear"]
3203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_x)"]
3204#[inline(always)]
3205#[target_feature(enable = "sve")]
3206#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3207#[cfg_attr(test, assert_instr(bic))]
3208pub fn svbic_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
3209    svbic_u8_x(pg, op1, svdup_n_u8(op2))
3210}
3211#[doc = "Bitwise clear"]
3212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_z)"]
3213#[inline(always)]
3214#[target_feature(enable = "sve")]
3215#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3216#[cfg_attr(test, assert_instr(bic))]
3217pub fn svbic_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
3218    svbic_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
3219}
3220#[doc = "Bitwise clear"]
3221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_z)"]
3222#[inline(always)]
3223#[target_feature(enable = "sve")]
3224#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3225#[cfg_attr(test, assert_instr(bic))]
3226pub fn svbic_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
3227    svbic_u8_z(pg, op1, svdup_n_u8(op2))
3228}
3229#[doc = "Bitwise clear"]
3230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_m)"]
3231#[inline(always)]
3232#[target_feature(enable = "sve")]
3233#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3234#[cfg_attr(test, assert_instr(bic))]
3235pub fn svbic_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
3236    unsafe { svbic_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
3237}
3238#[doc = "Bitwise clear"]
3239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_m)"]
3240#[inline(always)]
3241#[target_feature(enable = "sve")]
3242#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3243#[cfg_attr(test, assert_instr(bic))]
3244pub fn svbic_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
3245    svbic_u16_m(pg, op1, svdup_n_u16(op2))
3246}
3247#[doc = "Bitwise clear"]
3248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_x)"]
3249#[inline(always)]
3250#[target_feature(enable = "sve")]
3251#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3252#[cfg_attr(test, assert_instr(bic))]
3253pub fn svbic_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
3254    svbic_u16_m(pg, op1, op2)
3255}
3256#[doc = "Bitwise clear"]
3257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_x)"]
3258#[inline(always)]
3259#[target_feature(enable = "sve")]
3260#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3261#[cfg_attr(test, assert_instr(bic))]
3262pub fn svbic_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
3263    svbic_u16_x(pg, op1, svdup_n_u16(op2))
3264}
3265#[doc = "Bitwise clear"]
3266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_z)"]
3267#[inline(always)]
3268#[target_feature(enable = "sve")]
3269#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3270#[cfg_attr(test, assert_instr(bic))]
3271pub fn svbic_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
3272    svbic_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
3273}
3274#[doc = "Bitwise clear"]
3275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_z)"]
3276#[inline(always)]
3277#[target_feature(enable = "sve")]
3278#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3279#[cfg_attr(test, assert_instr(bic))]
3280pub fn svbic_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
3281    svbic_u16_z(pg, op1, svdup_n_u16(op2))
3282}
3283#[doc = "Bitwise clear"]
3284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_m)"]
3285#[inline(always)]
3286#[target_feature(enable = "sve")]
3287#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3288#[cfg_attr(test, assert_instr(bic))]
3289pub fn svbic_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
3290    unsafe { svbic_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
3291}
3292#[doc = "Bitwise clear"]
3293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_m)"]
3294#[inline(always)]
3295#[target_feature(enable = "sve")]
3296#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3297#[cfg_attr(test, assert_instr(bic))]
3298pub fn svbic_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
3299    svbic_u32_m(pg, op1, svdup_n_u32(op2))
3300}
3301#[doc = "Bitwise clear"]
3302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_x)"]
3303#[inline(always)]
3304#[target_feature(enable = "sve")]
3305#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3306#[cfg_attr(test, assert_instr(bic))]
3307pub fn svbic_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
3308    svbic_u32_m(pg, op1, op2)
3309}
3310#[doc = "Bitwise clear"]
3311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_x)"]
3312#[inline(always)]
3313#[target_feature(enable = "sve")]
3314#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3315#[cfg_attr(test, assert_instr(bic))]
3316pub fn svbic_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
3317    svbic_u32_x(pg, op1, svdup_n_u32(op2))
3318}
3319#[doc = "Bitwise clear"]
3320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_z)"]
3321#[inline(always)]
3322#[target_feature(enable = "sve")]
3323#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3324#[cfg_attr(test, assert_instr(bic))]
3325pub fn svbic_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
3326    svbic_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
3327}
3328#[doc = "Bitwise clear"]
3329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_z)"]
3330#[inline(always)]
3331#[target_feature(enable = "sve")]
3332#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3333#[cfg_attr(test, assert_instr(bic))]
3334pub fn svbic_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
3335    svbic_u32_z(pg, op1, svdup_n_u32(op2))
3336}
3337#[doc = "Bitwise clear"]
3338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_m)"]
3339#[inline(always)]
3340#[target_feature(enable = "sve")]
3341#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3342#[cfg_attr(test, assert_instr(bic))]
3343pub fn svbic_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
3344    unsafe { svbic_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
3345}
3346#[doc = "Bitwise clear"]
3347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_m)"]
3348#[inline(always)]
3349#[target_feature(enable = "sve")]
3350#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3351#[cfg_attr(test, assert_instr(bic))]
3352pub fn svbic_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
3353    svbic_u64_m(pg, op1, svdup_n_u64(op2))
3354}
3355#[doc = "Bitwise clear"]
3356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_x)"]
3357#[inline(always)]
3358#[target_feature(enable = "sve")]
3359#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3360#[cfg_attr(test, assert_instr(bic))]
3361pub fn svbic_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
3362    svbic_u64_m(pg, op1, op2)
3363}
3364#[doc = "Bitwise clear"]
3365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_x)"]
3366#[inline(always)]
3367#[target_feature(enable = "sve")]
3368#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3369#[cfg_attr(test, assert_instr(bic))]
3370pub fn svbic_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
3371    svbic_u64_x(pg, op1, svdup_n_u64(op2))
3372}
3373#[doc = "Bitwise clear"]
3374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_z)"]
3375#[inline(always)]
3376#[target_feature(enable = "sve")]
3377#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3378#[cfg_attr(test, assert_instr(bic))]
3379pub fn svbic_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
3380    svbic_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
3381}
3382#[doc = "Bitwise clear"]
3383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_z)"]
3384#[inline(always)]
3385#[target_feature(enable = "sve")]
3386#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3387#[cfg_attr(test, assert_instr(bic))]
3388pub fn svbic_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
3389    svbic_u64_z(pg, op1, svdup_n_u64(op2))
3390}
3391#[doc = "Break after first true condition"]
3392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_m)"]
3393#[inline(always)]
3394#[target_feature(enable = "sve")]
3395#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3396#[cfg_attr(test, assert_instr(brka))]
3397pub fn svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t {
3398    unsafe extern "unadjusted" {
3399        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.nxv16i1")]
3400        fn _svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t;
3401    }
3402    unsafe { _svbrka_b_m(inactive, pg, op) }
3403}
3404#[doc = "Break after first true condition"]
3405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_z)"]
3406#[inline(always)]
3407#[target_feature(enable = "sve")]
3408#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3409#[cfg_attr(test, assert_instr(brka))]
3410pub fn svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t {
3411    unsafe extern "unadjusted" {
3412        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.z.nxv16i1")]
3413        fn _svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t;
3414    }
3415    unsafe { _svbrka_b_z(pg, op) }
3416}
3417#[doc = "Break before first true condition"]
3418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_m)"]
3419#[inline(always)]
3420#[target_feature(enable = "sve")]
3421#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3422#[cfg_attr(test, assert_instr(brkb))]
3423pub fn svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t {
3424    unsafe extern "unadjusted" {
3425        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.nxv16i1")]
3426        fn _svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t;
3427    }
3428    unsafe { _svbrkb_b_m(inactive, pg, op) }
3429}
3430#[doc = "Break before first true condition"]
3431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_z)"]
3432#[inline(always)]
3433#[target_feature(enable = "sve")]
3434#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3435#[cfg_attr(test, assert_instr(brkb))]
3436pub fn svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t {
3437    unsafe extern "unadjusted" {
3438        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.z.nxv16i1")]
3439        fn _svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t;
3440    }
3441    unsafe { _svbrkb_b_z(pg, op) }
3442}
3443#[doc = "Propagate break to next partition"]
3444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkn[_b]_z)"]
3445#[inline(always)]
3446#[target_feature(enable = "sve")]
3447#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3448#[cfg_attr(test, assert_instr(brkn))]
3449pub fn svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
3450    unsafe extern "unadjusted" {
3451        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkn.z.nxv16i1")]
3452        fn _svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
3453    }
3454    unsafe { _svbrkn_b_z(pg, op1, op2) }
3455}
3456#[doc = "Break after first true condition, propagating from previous partition"]
3457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpa[_b]_z)"]
3458#[inline(always)]
3459#[target_feature(enable = "sve")]
3460#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3461#[cfg_attr(test, assert_instr(brkpa))]
3462pub fn svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
3463    unsafe extern "unadjusted" {
3464        #[cfg_attr(
3465            target_arch = "aarch64",
3466            link_name = "llvm.aarch64.sve.brkpa.z.nxv16i1"
3467        )]
3468        fn _svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
3469    }
3470    unsafe { _svbrkpa_b_z(pg, op1, op2) }
3471}
3472#[doc = "Break before first true condition, propagating from previous partition"]
3473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpb[_b]_z)"]
3474#[inline(always)]
3475#[target_feature(enable = "sve")]
3476#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3477#[cfg_attr(test, assert_instr(brkpb))]
3478pub fn svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
3479    unsafe extern "unadjusted" {
3480        #[cfg_attr(
3481            target_arch = "aarch64",
3482            link_name = "llvm.aarch64.sve.brkpb.z.nxv16i1"
3483        )]
3484        fn _svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
3485    }
3486    unsafe { _svbrkpb_b_z(pg, op1, op2) }
3487}
3488#[doc = "Complex add with rotate"]
3489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"]
3490#[inline(always)]
3491#[target_feature(enable = "sve")]
3492#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3493#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
3494pub fn svcadd_f32_m<const IMM_ROTATION: i32>(
3495    pg: svbool_t,
3496    op1: svfloat32_t,
3497    op2: svfloat32_t,
3498) -> svfloat32_t {
3499    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
3500    unsafe extern "unadjusted" {
3501        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv4f32")]
3502        fn _svcadd_f32_m(
3503            pg: svbool4_t,
3504            op1: svfloat32_t,
3505            op2: svfloat32_t,
3506            imm_rotation: i32,
3507        ) -> svfloat32_t;
3508    }
3509    unsafe { _svcadd_f32_m(pg.sve_into(), op1, op2, IMM_ROTATION) }
3510}
3511#[doc = "Complex add with rotate"]
3512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"]
3513#[inline(always)]
3514#[target_feature(enable = "sve")]
3515#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3516#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
3517pub fn svcadd_f32_x<const IMM_ROTATION: i32>(
3518    pg: svbool_t,
3519    op1: svfloat32_t,
3520    op2: svfloat32_t,
3521) -> svfloat32_t {
3522    svcadd_f32_m::<IMM_ROTATION>(pg, op1, op2)
3523}
3524#[doc = "Complex add with rotate"]
3525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"]
3526#[inline(always)]
3527#[target_feature(enable = "sve")]
3528#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3529#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
3530pub fn svcadd_f32_z<const IMM_ROTATION: i32>(
3531    pg: svbool_t,
3532    op1: svfloat32_t,
3533    op2: svfloat32_t,
3534) -> svfloat32_t {
3535    svcadd_f32_m::<IMM_ROTATION>(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
3536}
3537#[doc = "Complex add with rotate"]
3538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"]
3539#[inline(always)]
3540#[target_feature(enable = "sve")]
3541#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3542#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
3543pub fn svcadd_f64_m<const IMM_ROTATION: i32>(
3544    pg: svbool_t,
3545    op1: svfloat64_t,
3546    op2: svfloat64_t,
3547) -> svfloat64_t {
3548    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
3549    unsafe extern "unadjusted" {
3550        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv2f64")]
3551        fn _svcadd_f64_m(
3552            pg: svbool2_t,
3553            op1: svfloat64_t,
3554            op2: svfloat64_t,
3555            imm_rotation: i32,
3556        ) -> svfloat64_t;
3557    }
3558    unsafe { _svcadd_f64_m(pg.sve_into(), op1, op2, IMM_ROTATION) }
3559}
3560#[doc = "Complex add with rotate"]
3561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"]
3562#[inline(always)]
3563#[target_feature(enable = "sve")]
3564#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3565#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
3566pub fn svcadd_f64_x<const IMM_ROTATION: i32>(
3567    pg: svbool_t,
3568    op1: svfloat64_t,
3569    op2: svfloat64_t,
3570) -> svfloat64_t {
3571    svcadd_f64_m::<IMM_ROTATION>(pg, op1, op2)
3572}
3573#[doc = "Complex add with rotate"]
3574#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"]
3575#[inline(always)]
3576#[target_feature(enable = "sve")]
3577#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3578#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
3579pub fn svcadd_f64_z<const IMM_ROTATION: i32>(
3580    pg: svbool_t,
3581    op1: svfloat64_t,
3582    op2: svfloat64_t,
3583) -> svfloat64_t {
3584    svcadd_f64_m::<IMM_ROTATION>(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
3585}
3586#[doc = "Conditionally extract element after last"]
3587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f32])"]
3588#[inline(always)]
3589#[target_feature(enable = "sve")]
3590#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3591#[cfg_attr(test, assert_instr(clasta))]
3592pub fn svclasta_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t {
3593    unsafe extern "unadjusted" {
3594        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4f32")]
3595        fn _svclasta_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t;
3596    }
3597    unsafe { _svclasta_f32(pg.sve_into(), fallback, data) }
3598}
3599#[doc = "Conditionally extract element after last"]
3600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f64])"]
3601#[inline(always)]
3602#[target_feature(enable = "sve")]
3603#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3604#[cfg_attr(test, assert_instr(clasta))]
3605pub fn svclasta_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t {
3606    unsafe extern "unadjusted" {
3607        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2f64")]
3608        fn _svclasta_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t;
3609    }
3610    unsafe { _svclasta_f64(pg.sve_into(), fallback, data) }
3611}
3612#[doc = "Conditionally extract element after last"]
3613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s8])"]
3614#[inline(always)]
3615#[target_feature(enable = "sve")]
3616#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3617#[cfg_attr(test, assert_instr(clasta))]
3618pub fn svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t {
3619    unsafe extern "unadjusted" {
3620        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv16i8")]
3621        fn _svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t;
3622    }
3623    unsafe { _svclasta_s8(pg, fallback, data) }
3624}
3625#[doc = "Conditionally extract element after last"]
3626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s16])"]
3627#[inline(always)]
3628#[target_feature(enable = "sve")]
3629#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3630#[cfg_attr(test, assert_instr(clasta))]
3631pub fn svclasta_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t {
3632    unsafe extern "unadjusted" {
3633        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv8i16")]
3634        fn _svclasta_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t;
3635    }
3636    unsafe { _svclasta_s16(pg.sve_into(), fallback, data) }
3637}
3638#[doc = "Conditionally extract element after last"]
3639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s32])"]
3640#[inline(always)]
3641#[target_feature(enable = "sve")]
3642#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3643#[cfg_attr(test, assert_instr(clasta))]
3644pub fn svclasta_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t {
3645    unsafe extern "unadjusted" {
3646        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4i32")]
3647        fn _svclasta_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t;
3648    }
3649    unsafe { _svclasta_s32(pg.sve_into(), fallback, data) }
3650}
3651#[doc = "Conditionally extract element after last"]
3652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s64])"]
3653#[inline(always)]
3654#[target_feature(enable = "sve")]
3655#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3656#[cfg_attr(test, assert_instr(clasta))]
3657pub fn svclasta_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t {
3658    unsafe extern "unadjusted" {
3659        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2i64")]
3660        fn _svclasta_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t;
3661    }
3662    unsafe { _svclasta_s64(pg.sve_into(), fallback, data) }
3663}
3664#[doc = "Conditionally extract element after last"]
3665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u8])"]
3666#[inline(always)]
3667#[target_feature(enable = "sve")]
3668#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3669#[cfg_attr(test, assert_instr(clasta))]
3670pub fn svclasta_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t {
3671    unsafe { svclasta_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3672}
3673#[doc = "Conditionally extract element after last"]
3674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u16])"]
3675#[inline(always)]
3676#[target_feature(enable = "sve")]
3677#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3678#[cfg_attr(test, assert_instr(clasta))]
3679pub fn svclasta_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t {
3680    unsafe { svclasta_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3681}
3682#[doc = "Conditionally extract element after last"]
3683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u32])"]
3684#[inline(always)]
3685#[target_feature(enable = "sve")]
3686#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3687#[cfg_attr(test, assert_instr(clasta))]
3688pub fn svclasta_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t {
3689    unsafe { svclasta_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3690}
3691#[doc = "Conditionally extract element after last"]
3692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u64])"]
3693#[inline(always)]
3694#[target_feature(enable = "sve")]
3695#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3696#[cfg_attr(test, assert_instr(clasta))]
3697pub fn svclasta_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t {
3698    unsafe { svclasta_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3699}
3700#[doc = "Conditionally extract element after last"]
3701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f32])"]
3702#[inline(always)]
3703#[target_feature(enable = "sve")]
3704#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3705#[cfg_attr(test, assert_instr(clasta))]
3706pub fn svclasta_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 {
3707    unsafe extern "unadjusted" {
3708        #[cfg_attr(
3709            target_arch = "aarch64",
3710            link_name = "llvm.aarch64.sve.clasta.n.nxv4f32"
3711        )]
3712        fn _svclasta_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32;
3713    }
3714    unsafe { _svclasta_n_f32(pg.sve_into(), fallback, data) }
3715}
3716#[doc = "Conditionally extract element after last"]
3717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f64])"]
3718#[inline(always)]
3719#[target_feature(enable = "sve")]
3720#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3721#[cfg_attr(test, assert_instr(clasta))]
3722pub fn svclasta_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 {
3723    unsafe extern "unadjusted" {
3724        #[cfg_attr(
3725            target_arch = "aarch64",
3726            link_name = "llvm.aarch64.sve.clasta.n.nxv2f64"
3727        )]
3728        fn _svclasta_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64;
3729    }
3730    unsafe { _svclasta_n_f64(pg.sve_into(), fallback, data) }
3731}
3732#[doc = "Conditionally extract element after last"]
3733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s8])"]
3734#[inline(always)]
3735#[target_feature(enable = "sve")]
3736#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3737#[cfg_attr(test, assert_instr(clasta))]
3738pub fn svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 {
3739    unsafe extern "unadjusted" {
3740        #[cfg_attr(
3741            target_arch = "aarch64",
3742            link_name = "llvm.aarch64.sve.clasta.n.nxv16i8"
3743        )]
3744        fn _svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8;
3745    }
3746    unsafe { _svclasta_n_s8(pg, fallback, data) }
3747}
3748#[doc = "Conditionally extract element after last"]
3749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s16])"]
3750#[inline(always)]
3751#[target_feature(enable = "sve")]
3752#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3753#[cfg_attr(test, assert_instr(clasta))]
3754pub fn svclasta_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 {
3755    unsafe extern "unadjusted" {
3756        #[cfg_attr(
3757            target_arch = "aarch64",
3758            link_name = "llvm.aarch64.sve.clasta.n.nxv8i16"
3759        )]
3760        fn _svclasta_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16;
3761    }
3762    unsafe { _svclasta_n_s16(pg.sve_into(), fallback, data) }
3763}
3764#[doc = "Conditionally extract element after last"]
3765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s32])"]
3766#[inline(always)]
3767#[target_feature(enable = "sve")]
3768#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3769#[cfg_attr(test, assert_instr(clasta))]
3770pub fn svclasta_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 {
3771    unsafe extern "unadjusted" {
3772        #[cfg_attr(
3773            target_arch = "aarch64",
3774            link_name = "llvm.aarch64.sve.clasta.n.nxv4i32"
3775        )]
3776        fn _svclasta_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32;
3777    }
3778    unsafe { _svclasta_n_s32(pg.sve_into(), fallback, data) }
3779}
3780#[doc = "Conditionally extract element after last"]
3781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s64])"]
3782#[inline(always)]
3783#[target_feature(enable = "sve")]
3784#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3785#[cfg_attr(test, assert_instr(clasta))]
3786pub fn svclasta_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 {
3787    unsafe extern "unadjusted" {
3788        #[cfg_attr(
3789            target_arch = "aarch64",
3790            link_name = "llvm.aarch64.sve.clasta.n.nxv2i64"
3791        )]
3792        fn _svclasta_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64;
3793    }
3794    unsafe { _svclasta_n_s64(pg.sve_into(), fallback, data) }
3795}
3796#[doc = "Conditionally extract element after last"]
3797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u8])"]
3798#[inline(always)]
3799#[target_feature(enable = "sve")]
3800#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3801#[cfg_attr(test, assert_instr(clasta))]
3802pub fn svclasta_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 {
3803    unsafe { svclasta_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3804}
3805#[doc = "Conditionally extract element after last"]
3806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u16])"]
3807#[inline(always)]
3808#[target_feature(enable = "sve")]
3809#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3810#[cfg_attr(test, assert_instr(clasta))]
3811pub fn svclasta_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 {
3812    unsafe { svclasta_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3813}
3814#[doc = "Conditionally extract element after last"]
3815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u32])"]
3816#[inline(always)]
3817#[target_feature(enable = "sve")]
3818#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3819#[cfg_attr(test, assert_instr(clasta))]
3820pub fn svclasta_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 {
3821    unsafe { svclasta_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3822}
3823#[doc = "Conditionally extract element after last"]
3824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u64])"]
3825#[inline(always)]
3826#[target_feature(enable = "sve")]
3827#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3828#[cfg_attr(test, assert_instr(clasta))]
3829pub fn svclasta_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 {
3830    unsafe { svclasta_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3831}
3832#[doc = "Conditionally extract last element"]
3833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f32])"]
3834#[inline(always)]
3835#[target_feature(enable = "sve")]
3836#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3837#[cfg_attr(test, assert_instr(clastb))]
3838pub fn svclastb_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t {
3839    unsafe extern "unadjusted" {
3840        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4f32")]
3841        fn _svclastb_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t;
3842    }
3843    unsafe { _svclastb_f32(pg.sve_into(), fallback, data) }
3844}
3845#[doc = "Conditionally extract last element"]
3846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f64])"]
3847#[inline(always)]
3848#[target_feature(enable = "sve")]
3849#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3850#[cfg_attr(test, assert_instr(clastb))]
3851pub fn svclastb_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t {
3852    unsafe extern "unadjusted" {
3853        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2f64")]
3854        fn _svclastb_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t;
3855    }
3856    unsafe { _svclastb_f64(pg.sve_into(), fallback, data) }
3857}
3858#[doc = "Conditionally extract last element"]
3859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s8])"]
3860#[inline(always)]
3861#[target_feature(enable = "sve")]
3862#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3863#[cfg_attr(test, assert_instr(clastb))]
3864pub fn svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t {
3865    unsafe extern "unadjusted" {
3866        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv16i8")]
3867        fn _svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t;
3868    }
3869    unsafe { _svclastb_s8(pg, fallback, data) }
3870}
3871#[doc = "Conditionally extract last element"]
3872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s16])"]
3873#[inline(always)]
3874#[target_feature(enable = "sve")]
3875#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3876#[cfg_attr(test, assert_instr(clastb))]
3877pub fn svclastb_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t {
3878    unsafe extern "unadjusted" {
3879        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv8i16")]
3880        fn _svclastb_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t;
3881    }
3882    unsafe { _svclastb_s16(pg.sve_into(), fallback, data) }
3883}
3884#[doc = "Conditionally extract last element"]
3885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s32])"]
3886#[inline(always)]
3887#[target_feature(enable = "sve")]
3888#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3889#[cfg_attr(test, assert_instr(clastb))]
3890pub fn svclastb_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t {
3891    unsafe extern "unadjusted" {
3892        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4i32")]
3893        fn _svclastb_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t;
3894    }
3895    unsafe { _svclastb_s32(pg.sve_into(), fallback, data) }
3896}
3897#[doc = "Conditionally extract last element"]
3898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s64])"]
3899#[inline(always)]
3900#[target_feature(enable = "sve")]
3901#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3902#[cfg_attr(test, assert_instr(clastb))]
3903pub fn svclastb_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t {
3904    unsafe extern "unadjusted" {
3905        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2i64")]
3906        fn _svclastb_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t;
3907    }
3908    unsafe { _svclastb_s64(pg.sve_into(), fallback, data) }
3909}
3910#[doc = "Conditionally extract last element"]
3911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u8])"]
3912#[inline(always)]
3913#[target_feature(enable = "sve")]
3914#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3915#[cfg_attr(test, assert_instr(clastb))]
3916pub fn svclastb_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t {
3917    unsafe { svclastb_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3918}
3919#[doc = "Conditionally extract last element"]
3920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u16])"]
3921#[inline(always)]
3922#[target_feature(enable = "sve")]
3923#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3924#[cfg_attr(test, assert_instr(clastb))]
3925pub fn svclastb_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t {
3926    unsafe { svclastb_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3927}
3928#[doc = "Conditionally extract last element"]
3929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u32])"]
3930#[inline(always)]
3931#[target_feature(enable = "sve")]
3932#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3933#[cfg_attr(test, assert_instr(clastb))]
3934pub fn svclastb_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t {
3935    unsafe { svclastb_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3936}
3937#[doc = "Conditionally extract last element"]
3938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u64])"]
3939#[inline(always)]
3940#[target_feature(enable = "sve")]
3941#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3942#[cfg_attr(test, assert_instr(clastb))]
3943pub fn svclastb_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t {
3944    unsafe { svclastb_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
3945}
3946#[doc = "Conditionally extract last element"]
3947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f32])"]
3948#[inline(always)]
3949#[target_feature(enable = "sve")]
3950#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3951#[cfg_attr(test, assert_instr(clastb))]
3952pub fn svclastb_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 {
3953    unsafe extern "unadjusted" {
3954        #[cfg_attr(
3955            target_arch = "aarch64",
3956            link_name = "llvm.aarch64.sve.clastb.n.nxv4f32"
3957        )]
3958        fn _svclastb_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32;
3959    }
3960    unsafe { _svclastb_n_f32(pg.sve_into(), fallback, data) }
3961}
3962#[doc = "Conditionally extract last element"]
3963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f64])"]
3964#[inline(always)]
3965#[target_feature(enable = "sve")]
3966#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3967#[cfg_attr(test, assert_instr(clastb))]
3968pub fn svclastb_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 {
3969    unsafe extern "unadjusted" {
3970        #[cfg_attr(
3971            target_arch = "aarch64",
3972            link_name = "llvm.aarch64.sve.clastb.n.nxv2f64"
3973        )]
3974        fn _svclastb_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64;
3975    }
3976    unsafe { _svclastb_n_f64(pg.sve_into(), fallback, data) }
3977}
3978#[doc = "Conditionally extract last element"]
3979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s8])"]
3980#[inline(always)]
3981#[target_feature(enable = "sve")]
3982#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3983#[cfg_attr(test, assert_instr(clastb))]
3984pub fn svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 {
3985    unsafe extern "unadjusted" {
3986        #[cfg_attr(
3987            target_arch = "aarch64",
3988            link_name = "llvm.aarch64.sve.clastb.n.nxv16i8"
3989        )]
3990        fn _svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8;
3991    }
3992    unsafe { _svclastb_n_s8(pg, fallback, data) }
3993}
3994#[doc = "Conditionally extract last element"]
3995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s16])"]
3996#[inline(always)]
3997#[target_feature(enable = "sve")]
3998#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
3999#[cfg_attr(test, assert_instr(clastb))]
4000pub fn svclastb_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 {
4001    unsafe extern "unadjusted" {
4002        #[cfg_attr(
4003            target_arch = "aarch64",
4004            link_name = "llvm.aarch64.sve.clastb.n.nxv8i16"
4005        )]
4006        fn _svclastb_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16;
4007    }
4008    unsafe { _svclastb_n_s16(pg.sve_into(), fallback, data) }
4009}
4010#[doc = "Conditionally extract last element"]
4011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s32])"]
4012#[inline(always)]
4013#[target_feature(enable = "sve")]
4014#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4015#[cfg_attr(test, assert_instr(clastb))]
4016pub fn svclastb_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 {
4017    unsafe extern "unadjusted" {
4018        #[cfg_attr(
4019            target_arch = "aarch64",
4020            link_name = "llvm.aarch64.sve.clastb.n.nxv4i32"
4021        )]
4022        fn _svclastb_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32;
4023    }
4024    unsafe { _svclastb_n_s32(pg.sve_into(), fallback, data) }
4025}
4026#[doc = "Conditionally extract last element"]
4027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s64])"]
4028#[inline(always)]
4029#[target_feature(enable = "sve")]
4030#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4031#[cfg_attr(test, assert_instr(clastb))]
4032pub fn svclastb_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 {
4033    unsafe extern "unadjusted" {
4034        #[cfg_attr(
4035            target_arch = "aarch64",
4036            link_name = "llvm.aarch64.sve.clastb.n.nxv2i64"
4037        )]
4038        fn _svclastb_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64;
4039    }
4040    unsafe { _svclastb_n_s64(pg.sve_into(), fallback, data) }
4041}
4042#[doc = "Conditionally extract last element"]
4043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u8])"]
4044#[inline(always)]
4045#[target_feature(enable = "sve")]
4046#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4047#[cfg_attr(test, assert_instr(clastb))]
4048pub fn svclastb_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 {
4049    unsafe { svclastb_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
4050}
4051#[doc = "Conditionally extract last element"]
4052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u16])"]
4053#[inline(always)]
4054#[target_feature(enable = "sve")]
4055#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4056#[cfg_attr(test, assert_instr(clastb))]
4057pub fn svclastb_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 {
4058    unsafe { svclastb_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
4059}
4060#[doc = "Conditionally extract last element"]
4061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u32])"]
4062#[inline(always)]
4063#[target_feature(enable = "sve")]
4064#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4065#[cfg_attr(test, assert_instr(clastb))]
4066pub fn svclastb_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 {
4067    unsafe { svclastb_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
4068}
4069#[doc = "Conditionally extract last element"]
4070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u64])"]
4071#[inline(always)]
4072#[target_feature(enable = "sve")]
4073#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4074#[cfg_attr(test, assert_instr(clastb))]
4075pub fn svclastb_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 {
4076    unsafe { svclastb_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
4077}
4078#[doc = "Count leading sign bits"]
4079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_m)"]
4080#[inline(always)]
4081#[target_feature(enable = "sve")]
4082#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4083#[cfg_attr(test, assert_instr(cls))]
4084pub fn svcls_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t {
4085    unsafe extern "unadjusted" {
4086        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv16i8")]
4087        fn _svcls_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
4088    }
4089    unsafe { _svcls_s8_m(inactive.as_signed(), pg, op).as_unsigned() }
4090}
4091#[doc = "Count leading sign bits"]
4092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_x)"]
4093#[inline(always)]
4094#[target_feature(enable = "sve")]
4095#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4096#[cfg_attr(test, assert_instr(cls))]
4097pub fn svcls_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t {
4098    unsafe { svcls_s8_m(op.as_unsigned(), pg, op) }
4099}
4100#[doc = "Count leading sign bits"]
4101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_z)"]
4102#[inline(always)]
4103#[target_feature(enable = "sve")]
4104#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4105#[cfg_attr(test, assert_instr(cls))]
4106pub fn svcls_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t {
4107    svcls_s8_m(svdup_n_u8(0), pg, op)
4108}
4109#[doc = "Count leading sign bits"]
4110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_m)"]
4111#[inline(always)]
4112#[target_feature(enable = "sve")]
4113#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4114#[cfg_attr(test, assert_instr(cls))]
4115pub fn svcls_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t {
4116    unsafe extern "unadjusted" {
4117        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv8i16")]
4118        fn _svcls_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
4119    }
4120    unsafe { _svcls_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
4121}
4122#[doc = "Count leading sign bits"]
4123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_x)"]
4124#[inline(always)]
4125#[target_feature(enable = "sve")]
4126#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4127#[cfg_attr(test, assert_instr(cls))]
4128pub fn svcls_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t {
4129    unsafe { svcls_s16_m(op.as_unsigned(), pg, op) }
4130}
4131#[doc = "Count leading sign bits"]
4132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_z)"]
4133#[inline(always)]
4134#[target_feature(enable = "sve")]
4135#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4136#[cfg_attr(test, assert_instr(cls))]
4137pub fn svcls_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t {
4138    svcls_s16_m(svdup_n_u16(0), pg, op)
4139}
4140#[doc = "Count leading sign bits"]
4141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_m)"]
4142#[inline(always)]
4143#[target_feature(enable = "sve")]
4144#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4145#[cfg_attr(test, assert_instr(cls))]
4146pub fn svcls_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t {
4147    unsafe extern "unadjusted" {
4148        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv4i32")]
4149        fn _svcls_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
4150    }
4151    unsafe { _svcls_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
4152}
4153#[doc = "Count leading sign bits"]
4154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_x)"]
4155#[inline(always)]
4156#[target_feature(enable = "sve")]
4157#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4158#[cfg_attr(test, assert_instr(cls))]
4159pub fn svcls_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t {
4160    unsafe { svcls_s32_m(op.as_unsigned(), pg, op) }
4161}
4162#[doc = "Count leading sign bits"]
4163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_z)"]
4164#[inline(always)]
4165#[target_feature(enable = "sve")]
4166#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4167#[cfg_attr(test, assert_instr(cls))]
4168pub fn svcls_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t {
4169    svcls_s32_m(svdup_n_u32(0), pg, op)
4170}
4171#[doc = "Count leading sign bits"]
4172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_m)"]
4173#[inline(always)]
4174#[target_feature(enable = "sve")]
4175#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4176#[cfg_attr(test, assert_instr(cls))]
4177pub fn svcls_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t {
4178    unsafe extern "unadjusted" {
4179        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv2i64")]
4180        fn _svcls_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
4181    }
4182    unsafe { _svcls_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
4183}
4184#[doc = "Count leading sign bits"]
4185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_x)"]
4186#[inline(always)]
4187#[target_feature(enable = "sve")]
4188#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4189#[cfg_attr(test, assert_instr(cls))]
4190pub fn svcls_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t {
4191    unsafe { svcls_s64_m(op.as_unsigned(), pg, op) }
4192}
4193#[doc = "Count leading sign bits"]
4194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_z)"]
4195#[inline(always)]
4196#[target_feature(enable = "sve")]
4197#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4198#[cfg_attr(test, assert_instr(cls))]
4199pub fn svcls_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t {
4200    svcls_s64_m(svdup_n_u64(0), pg, op)
4201}
4202#[doc = "Count leading zero bits"]
4203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_m)"]
4204#[inline(always)]
4205#[target_feature(enable = "sve")]
4206#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4207#[cfg_attr(test, assert_instr(clz))]
4208pub fn svclz_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t {
4209    unsafe extern "unadjusted" {
4210        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv16i8")]
4211        fn _svclz_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
4212    }
4213    unsafe { _svclz_s8_m(inactive.as_signed(), pg, op).as_unsigned() }
4214}
4215#[doc = "Count leading zero bits"]
4216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_x)"]
4217#[inline(always)]
4218#[target_feature(enable = "sve")]
4219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4220#[cfg_attr(test, assert_instr(clz))]
4221pub fn svclz_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t {
4222    unsafe { svclz_s8_m(op.as_unsigned(), pg, op) }
4223}
4224#[doc = "Count leading zero bits"]
4225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_z)"]
4226#[inline(always)]
4227#[target_feature(enable = "sve")]
4228#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4229#[cfg_attr(test, assert_instr(clz))]
4230pub fn svclz_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t {
4231    svclz_s8_m(svdup_n_u8(0), pg, op)
4232}
4233#[doc = "Count leading zero bits"]
4234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_m)"]
4235#[inline(always)]
4236#[target_feature(enable = "sve")]
4237#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4238#[cfg_attr(test, assert_instr(clz))]
4239pub fn svclz_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t {
4240    unsafe extern "unadjusted" {
4241        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv8i16")]
4242        fn _svclz_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
4243    }
4244    unsafe { _svclz_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
4245}
4246#[doc = "Count leading zero bits"]
4247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_x)"]
4248#[inline(always)]
4249#[target_feature(enable = "sve")]
4250#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4251#[cfg_attr(test, assert_instr(clz))]
4252pub fn svclz_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t {
4253    unsafe { svclz_s16_m(op.as_unsigned(), pg, op) }
4254}
4255#[doc = "Count leading zero bits"]
4256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_z)"]
4257#[inline(always)]
4258#[target_feature(enable = "sve")]
4259#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4260#[cfg_attr(test, assert_instr(clz))]
4261pub fn svclz_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t {
4262    svclz_s16_m(svdup_n_u16(0), pg, op)
4263}
4264#[doc = "Count leading zero bits"]
4265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_m)"]
4266#[inline(always)]
4267#[target_feature(enable = "sve")]
4268#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4269#[cfg_attr(test, assert_instr(clz))]
4270pub fn svclz_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t {
4271    unsafe extern "unadjusted" {
4272        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv4i32")]
4273        fn _svclz_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
4274    }
4275    unsafe { _svclz_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
4276}
4277#[doc = "Count leading zero bits"]
4278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_x)"]
4279#[inline(always)]
4280#[target_feature(enable = "sve")]
4281#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4282#[cfg_attr(test, assert_instr(clz))]
4283pub fn svclz_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t {
4284    unsafe { svclz_s32_m(op.as_unsigned(), pg, op) }
4285}
4286#[doc = "Count leading zero bits"]
4287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_z)"]
4288#[inline(always)]
4289#[target_feature(enable = "sve")]
4290#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4291#[cfg_attr(test, assert_instr(clz))]
4292pub fn svclz_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t {
4293    svclz_s32_m(svdup_n_u32(0), pg, op)
4294}
4295#[doc = "Count leading zero bits"]
4296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_m)"]
4297#[inline(always)]
4298#[target_feature(enable = "sve")]
4299#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4300#[cfg_attr(test, assert_instr(clz))]
4301pub fn svclz_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t {
4302    unsafe extern "unadjusted" {
4303        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv2i64")]
4304        fn _svclz_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
4305    }
4306    unsafe { _svclz_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
4307}
4308#[doc = "Count leading zero bits"]
4309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_x)"]
4310#[inline(always)]
4311#[target_feature(enable = "sve")]
4312#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4313#[cfg_attr(test, assert_instr(clz))]
4314pub fn svclz_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t {
4315    unsafe { svclz_s64_m(op.as_unsigned(), pg, op) }
4316}
4317#[doc = "Count leading zero bits"]
4318#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_z)"]
4319#[inline(always)]
4320#[target_feature(enable = "sve")]
4321#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4322#[cfg_attr(test, assert_instr(clz))]
4323pub fn svclz_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t {
4324    svclz_s64_m(svdup_n_u64(0), pg, op)
4325}
4326#[doc = "Count leading zero bits"]
4327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_m)"]
4328#[inline(always)]
4329#[target_feature(enable = "sve")]
4330#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4331#[cfg_attr(test, assert_instr(clz))]
4332pub fn svclz_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t {
4333    unsafe { svclz_s8_m(inactive, pg, op.as_signed()) }
4334}
4335#[doc = "Count leading zero bits"]
4336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_x)"]
4337#[inline(always)]
4338#[target_feature(enable = "sve")]
4339#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4340#[cfg_attr(test, assert_instr(clz))]
4341pub fn svclz_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t {
4342    svclz_u8_m(op, pg, op)
4343}
4344#[doc = "Count leading zero bits"]
4345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_z)"]
4346#[inline(always)]
4347#[target_feature(enable = "sve")]
4348#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4349#[cfg_attr(test, assert_instr(clz))]
4350pub fn svclz_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t {
4351    svclz_u8_m(svdup_n_u8(0), pg, op)
4352}
4353#[doc = "Count leading zero bits"]
4354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_m)"]
4355#[inline(always)]
4356#[target_feature(enable = "sve")]
4357#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4358#[cfg_attr(test, assert_instr(clz))]
4359pub fn svclz_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
4360    unsafe { svclz_s16_m(inactive, pg, op.as_signed()) }
4361}
4362#[doc = "Count leading zero bits"]
4363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_x)"]
4364#[inline(always)]
4365#[target_feature(enable = "sve")]
4366#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4367#[cfg_attr(test, assert_instr(clz))]
4368pub fn svclz_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
4369    svclz_u16_m(op, pg, op)
4370}
4371#[doc = "Count leading zero bits"]
4372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_z)"]
4373#[inline(always)]
4374#[target_feature(enable = "sve")]
4375#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4376#[cfg_attr(test, assert_instr(clz))]
4377pub fn svclz_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
4378    svclz_u16_m(svdup_n_u16(0), pg, op)
4379}
4380#[doc = "Count leading zero bits"]
4381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_m)"]
4382#[inline(always)]
4383#[target_feature(enable = "sve")]
4384#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4385#[cfg_attr(test, assert_instr(clz))]
4386pub fn svclz_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
4387    unsafe { svclz_s32_m(inactive, pg, op.as_signed()) }
4388}
4389#[doc = "Count leading zero bits"]
4390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_x)"]
4391#[inline(always)]
4392#[target_feature(enable = "sve")]
4393#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4394#[cfg_attr(test, assert_instr(clz))]
4395pub fn svclz_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
4396    svclz_u32_m(op, pg, op)
4397}
4398#[doc = "Count leading zero bits"]
4399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_z)"]
4400#[inline(always)]
4401#[target_feature(enable = "sve")]
4402#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4403#[cfg_attr(test, assert_instr(clz))]
4404pub fn svclz_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
4405    svclz_u32_m(svdup_n_u32(0), pg, op)
4406}
4407#[doc = "Count leading zero bits"]
4408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_m)"]
4409#[inline(always)]
4410#[target_feature(enable = "sve")]
4411#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4412#[cfg_attr(test, assert_instr(clz))]
4413pub fn svclz_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
4414    unsafe { svclz_s64_m(inactive, pg, op.as_signed()) }
4415}
4416#[doc = "Count leading zero bits"]
4417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_x)"]
4418#[inline(always)]
4419#[target_feature(enable = "sve")]
4420#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4421#[cfg_attr(test, assert_instr(clz))]
4422pub fn svclz_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
4423    svclz_u64_m(op, pg, op)
4424}
4425#[doc = "Count leading zero bits"]
4426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_z)"]
4427#[inline(always)]
4428#[target_feature(enable = "sve")]
4429#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4430#[cfg_attr(test, assert_instr(clz))]
4431pub fn svclz_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
4432    svclz_u64_m(svdup_n_u64(0), pg, op)
4433}
4434#[doc = "Complex multiply-add with rotate"]
4435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"]
4436#[inline(always)]
4437#[target_feature(enable = "sve")]
4438#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4439#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
4440pub fn svcmla_f32_m<const IMM_ROTATION: i32>(
4441    pg: svbool_t,
4442    op1: svfloat32_t,
4443    op2: svfloat32_t,
4444    op3: svfloat32_t,
4445) -> svfloat32_t {
4446    static_assert!(
4447        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
4448    );
4449    unsafe extern "unadjusted" {
4450        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv4f32")]
4451        fn _svcmla_f32_m(
4452            pg: svbool4_t,
4453            op1: svfloat32_t,
4454            op2: svfloat32_t,
4455            op3: svfloat32_t,
4456            imm_rotation: i32,
4457        ) -> svfloat32_t;
4458    }
4459    unsafe { _svcmla_f32_m(pg.sve_into(), op1, op2, op3, IMM_ROTATION) }
4460}
4461#[doc = "Complex multiply-add with rotate"]
4462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"]
4463#[inline(always)]
4464#[target_feature(enable = "sve")]
4465#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4466#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
4467pub fn svcmla_f32_x<const IMM_ROTATION: i32>(
4468    pg: svbool_t,
4469    op1: svfloat32_t,
4470    op2: svfloat32_t,
4471    op3: svfloat32_t,
4472) -> svfloat32_t {
4473    svcmla_f32_m::<IMM_ROTATION>(pg, op1, op2, op3)
4474}
4475#[doc = "Complex multiply-add with rotate"]
4476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"]
4477#[inline(always)]
4478#[target_feature(enable = "sve")]
4479#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4480#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
4481pub fn svcmla_f32_z<const IMM_ROTATION: i32>(
4482    pg: svbool_t,
4483    op1: svfloat32_t,
4484    op2: svfloat32_t,
4485    op3: svfloat32_t,
4486) -> svfloat32_t {
4487    svcmla_f32_m::<IMM_ROTATION>(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
4488}
4489#[doc = "Complex multiply-add with rotate"]
4490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"]
4491#[inline(always)]
4492#[target_feature(enable = "sve")]
4493#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4494#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
4495pub fn svcmla_f64_m<const IMM_ROTATION: i32>(
4496    pg: svbool_t,
4497    op1: svfloat64_t,
4498    op2: svfloat64_t,
4499    op3: svfloat64_t,
4500) -> svfloat64_t {
4501    static_assert!(
4502        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
4503    );
4504    unsafe extern "unadjusted" {
4505        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv2f64")]
4506        fn _svcmla_f64_m(
4507            pg: svbool2_t,
4508            op1: svfloat64_t,
4509            op2: svfloat64_t,
4510            op3: svfloat64_t,
4511            imm_rotation: i32,
4512        ) -> svfloat64_t;
4513    }
4514    unsafe { _svcmla_f64_m(pg.sve_into(), op1, op2, op3, IMM_ROTATION) }
4515}
4516#[doc = "Complex multiply-add with rotate"]
4517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"]
4518#[inline(always)]
4519#[target_feature(enable = "sve")]
4520#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4521#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
4522pub fn svcmla_f64_x<const IMM_ROTATION: i32>(
4523    pg: svbool_t,
4524    op1: svfloat64_t,
4525    op2: svfloat64_t,
4526    op3: svfloat64_t,
4527) -> svfloat64_t {
4528    svcmla_f64_m::<IMM_ROTATION>(pg, op1, op2, op3)
4529}
4530#[doc = "Complex multiply-add with rotate"]
4531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"]
4532#[inline(always)]
4533#[target_feature(enable = "sve")]
4534#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4535#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
4536pub fn svcmla_f64_z<const IMM_ROTATION: i32>(
4537    pg: svbool_t,
4538    op1: svfloat64_t,
4539    op2: svfloat64_t,
4540    op3: svfloat64_t,
4541) -> svfloat64_t {
4542    svcmla_f64_m::<IMM_ROTATION>(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
4543}
4544#[doc = "Complex multiply-add with rotate"]
4545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"]
4546#[inline(always)]
4547#[target_feature(enable = "sve")]
4548#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4549#[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))]
4550pub fn svcmla_lane_f32<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
4551    op1: svfloat32_t,
4552    op2: svfloat32_t,
4553    op3: svfloat32_t,
4554) -> svfloat32_t {
4555    static_assert_range!(IMM_INDEX, 0..=1);
4556    static_assert!(
4557        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
4558    );
4559    unsafe extern "unadjusted" {
4560        #[cfg_attr(
4561            target_arch = "aarch64",
4562            link_name = "llvm.aarch64.sve.fcmla.lane.x.nxv4f32"
4563        )]
4564        fn _svcmla_lane_f32(
4565            op1: svfloat32_t,
4566            op2: svfloat32_t,
4567            op3: svfloat32_t,
4568            imm_index: i32,
4569            imm_rotation: i32,
4570        ) -> svfloat32_t;
4571    }
4572    unsafe { _svcmla_lane_f32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
4573}
4574#[doc = "Compare equal to"]
4575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"]
4576#[inline(always)]
4577#[target_feature(enable = "sve")]
4578#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4579#[cfg_attr(test, assert_instr(fcmeq))]
4580pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
4581    unsafe extern "unadjusted" {
4582        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv4f32")]
4583        fn _svcmpeq_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
4584    }
4585    unsafe { _svcmpeq_f32(pg.sve_into(), op1, op2).sve_into() }
4586}
4587#[doc = "Compare equal to"]
4588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"]
4589#[inline(always)]
4590#[target_feature(enable = "sve")]
4591#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4592#[cfg_attr(test, assert_instr(fcmeq))]
4593pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
4594    svcmpeq_f32(pg, op1, svdup_n_f32(op2))
4595}
4596#[doc = "Compare equal to"]
4597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"]
4598#[inline(always)]
4599#[target_feature(enable = "sve")]
4600#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4601#[cfg_attr(test, assert_instr(fcmeq))]
4602pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
4603    unsafe extern "unadjusted" {
4604        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv2f64")]
4605        fn _svcmpeq_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
4606    }
4607    unsafe { _svcmpeq_f64(pg.sve_into(), op1, op2).sve_into() }
4608}
4609#[doc = "Compare equal to"]
4610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"]
4611#[inline(always)]
4612#[target_feature(enable = "sve")]
4613#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4614#[cfg_attr(test, assert_instr(fcmeq))]
4615pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
4616    svcmpeq_f64(pg, op1, svdup_n_f64(op2))
4617}
4618#[doc = "Compare equal to"]
4619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"]
4620#[inline(always)]
4621#[target_feature(enable = "sve")]
4622#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4623#[cfg_attr(test, assert_instr(cmpeq))]
4624pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
4625    unsafe extern "unadjusted" {
4626        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv16i8")]
4627        fn _svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
4628    }
4629    unsafe { _svcmpeq_s8(pg, op1, op2) }
4630}
4631#[doc = "Compare equal to"]
4632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"]
4633#[inline(always)]
4634#[target_feature(enable = "sve")]
4635#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4636#[cfg_attr(test, assert_instr(cmpeq))]
4637pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
4638    svcmpeq_s8(pg, op1, svdup_n_s8(op2))
4639}
4640#[doc = "Compare equal to"]
4641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"]
4642#[inline(always)]
4643#[target_feature(enable = "sve")]
4644#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4645#[cfg_attr(test, assert_instr(cmpeq))]
4646pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
4647    unsafe extern "unadjusted" {
4648        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv8i16")]
4649        fn _svcmpeq_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
4650    }
4651    unsafe { _svcmpeq_s16(pg.sve_into(), op1, op2).sve_into() }
4652}
4653#[doc = "Compare equal to"]
4654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"]
4655#[inline(always)]
4656#[target_feature(enable = "sve")]
4657#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4658#[cfg_attr(test, assert_instr(cmpeq))]
4659pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
4660    svcmpeq_s16(pg, op1, svdup_n_s16(op2))
4661}
4662#[doc = "Compare equal to"]
4663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"]
4664#[inline(always)]
4665#[target_feature(enable = "sve")]
4666#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4667#[cfg_attr(test, assert_instr(cmpeq))]
4668pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
4669    unsafe extern "unadjusted" {
4670        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv4i32")]
4671        fn _svcmpeq_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
4672    }
4673    unsafe { _svcmpeq_s32(pg.sve_into(), op1, op2).sve_into() }
4674}
4675#[doc = "Compare equal to"]
4676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"]
4677#[inline(always)]
4678#[target_feature(enable = "sve")]
4679#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4680#[cfg_attr(test, assert_instr(cmpeq))]
4681pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
4682    svcmpeq_s32(pg, op1, svdup_n_s32(op2))
4683}
4684#[doc = "Compare equal to"]
4685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"]
4686#[inline(always)]
4687#[target_feature(enable = "sve")]
4688#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4689#[cfg_attr(test, assert_instr(cmpeq))]
4690pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
4691    unsafe extern "unadjusted" {
4692        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv2i64")]
4693        fn _svcmpeq_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
4694    }
4695    unsafe { _svcmpeq_s64(pg.sve_into(), op1, op2).sve_into() }
4696}
4697#[doc = "Compare equal to"]
4698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"]
4699#[inline(always)]
4700#[target_feature(enable = "sve")]
4701#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4702#[cfg_attr(test, assert_instr(cmpeq))]
4703pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
4704    svcmpeq_s64(pg, op1, svdup_n_s64(op2))
4705}
4706#[doc = "Compare equal to"]
4707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"]
4708#[inline(always)]
4709#[target_feature(enable = "sve")]
4710#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4711#[cfg_attr(test, assert_instr(cmpeq))]
4712pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
4713    unsafe { svcmpeq_s8(pg, op1.as_signed(), op2.as_signed()) }
4714}
4715#[doc = "Compare equal to"]
4716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"]
4717#[inline(always)]
4718#[target_feature(enable = "sve")]
4719#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4720#[cfg_attr(test, assert_instr(cmpeq))]
4721pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
4722    svcmpeq_u8(pg, op1, svdup_n_u8(op2))
4723}
4724#[doc = "Compare equal to"]
4725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"]
4726#[inline(always)]
4727#[target_feature(enable = "sve")]
4728#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4729#[cfg_attr(test, assert_instr(cmpeq))]
4730pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
4731    unsafe { svcmpeq_s16(pg, op1.as_signed(), op2.as_signed()) }
4732}
4733#[doc = "Compare equal to"]
4734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"]
4735#[inline(always)]
4736#[target_feature(enable = "sve")]
4737#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4738#[cfg_attr(test, assert_instr(cmpeq))]
4739pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
4740    svcmpeq_u16(pg, op1, svdup_n_u16(op2))
4741}
4742#[doc = "Compare equal to"]
4743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"]
4744#[inline(always)]
4745#[target_feature(enable = "sve")]
4746#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4747#[cfg_attr(test, assert_instr(cmpeq))]
4748pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
4749    unsafe { svcmpeq_s32(pg, op1.as_signed(), op2.as_signed()) }
4750}
4751#[doc = "Compare equal to"]
4752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"]
4753#[inline(always)]
4754#[target_feature(enable = "sve")]
4755#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4756#[cfg_attr(test, assert_instr(cmpeq))]
4757pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
4758    svcmpeq_u32(pg, op1, svdup_n_u32(op2))
4759}
4760#[doc = "Compare equal to"]
4761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"]
4762#[inline(always)]
4763#[target_feature(enable = "sve")]
4764#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4765#[cfg_attr(test, assert_instr(cmpeq))]
4766pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
4767    unsafe { svcmpeq_s64(pg, op1.as_signed(), op2.as_signed()) }
4768}
4769#[doc = "Compare equal to"]
4770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"]
4771#[inline(always)]
4772#[target_feature(enable = "sve")]
4773#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4774#[cfg_attr(test, assert_instr(cmpeq))]
4775pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
4776    svcmpeq_u64(pg, op1, svdup_n_u64(op2))
4777}
4778#[doc = "Compare equal to"]
4779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s8])"]
4780#[inline(always)]
4781#[target_feature(enable = "sve")]
4782#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4783#[cfg_attr(test, assert_instr(cmpeq))]
4784pub fn svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
4785    unsafe extern "unadjusted" {
4786        #[cfg_attr(
4787            target_arch = "aarch64",
4788            link_name = "llvm.aarch64.sve.cmpeq.wide.nxv16i8"
4789        )]
4790        fn _svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
4791    }
4792    unsafe { _svcmpeq_wide_s8(pg, op1, op2) }
4793}
4794#[doc = "Compare equal to"]
4795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s8])"]
4796#[inline(always)]
4797#[target_feature(enable = "sve")]
4798#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4799#[cfg_attr(test, assert_instr(cmpeq))]
4800pub fn svcmpeq_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
4801    svcmpeq_wide_s8(pg, op1, svdup_n_s64(op2))
4802}
4803#[doc = "Compare equal to"]
4804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s16])"]
4805#[inline(always)]
4806#[target_feature(enable = "sve")]
4807#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4808#[cfg_attr(test, assert_instr(cmpeq))]
4809pub fn svcmpeq_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
4810    unsafe extern "unadjusted" {
4811        #[cfg_attr(
4812            target_arch = "aarch64",
4813            link_name = "llvm.aarch64.sve.cmpeq.wide.nxv8i16"
4814        )]
4815        fn _svcmpeq_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
4816    }
4817    unsafe { _svcmpeq_wide_s16(pg.sve_into(), op1, op2).sve_into() }
4818}
4819#[doc = "Compare equal to"]
4820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s16])"]
4821#[inline(always)]
4822#[target_feature(enable = "sve")]
4823#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4824#[cfg_attr(test, assert_instr(cmpeq))]
4825pub fn svcmpeq_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
4826    svcmpeq_wide_s16(pg, op1, svdup_n_s64(op2))
4827}
4828#[doc = "Compare equal to"]
4829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s32])"]
4830#[inline(always)]
4831#[target_feature(enable = "sve")]
4832#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4833#[cfg_attr(test, assert_instr(cmpeq))]
4834pub fn svcmpeq_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
4835    unsafe extern "unadjusted" {
4836        #[cfg_attr(
4837            target_arch = "aarch64",
4838            link_name = "llvm.aarch64.sve.cmpeq.wide.nxv4i32"
4839        )]
4840        fn _svcmpeq_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
4841    }
4842    unsafe { _svcmpeq_wide_s32(pg.sve_into(), op1, op2).sve_into() }
4843}
4844#[doc = "Compare equal to"]
4845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s32])"]
4846#[inline(always)]
4847#[target_feature(enable = "sve")]
4848#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4849#[cfg_attr(test, assert_instr(cmpeq))]
4850pub fn svcmpeq_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
4851    svcmpeq_wide_s32(pg, op1, svdup_n_s64(op2))
4852}
4853#[doc = "Compare greater than or equal to"]
4854#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"]
4855#[inline(always)]
4856#[target_feature(enable = "sve")]
4857#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4858#[cfg_attr(test, assert_instr(fcmge))]
4859pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
4860    unsafe extern "unadjusted" {
4861        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv4f32")]
4862        fn _svcmpge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
4863    }
4864    unsafe { _svcmpge_f32(pg.sve_into(), op1, op2).sve_into() }
4865}
4866#[doc = "Compare greater than or equal to"]
4867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"]
4868#[inline(always)]
4869#[target_feature(enable = "sve")]
4870#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4871#[cfg_attr(test, assert_instr(fcmge))]
4872pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
4873    svcmpge_f32(pg, op1, svdup_n_f32(op2))
4874}
4875#[doc = "Compare greater than or equal to"]
4876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"]
4877#[inline(always)]
4878#[target_feature(enable = "sve")]
4879#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4880#[cfg_attr(test, assert_instr(fcmge))]
4881pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
4882    unsafe extern "unadjusted" {
4883        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv2f64")]
4884        fn _svcmpge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
4885    }
4886    unsafe { _svcmpge_f64(pg.sve_into(), op1, op2).sve_into() }
4887}
4888#[doc = "Compare greater than or equal to"]
4889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"]
4890#[inline(always)]
4891#[target_feature(enable = "sve")]
4892#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4893#[cfg_attr(test, assert_instr(fcmge))]
4894pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
4895    svcmpge_f64(pg, op1, svdup_n_f64(op2))
4896}
4897#[doc = "Compare greater than or equal to"]
4898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"]
4899#[inline(always)]
4900#[target_feature(enable = "sve")]
4901#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4902#[cfg_attr(test, assert_instr(cmpge))]
4903pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
4904    unsafe extern "unadjusted" {
4905        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv16i8")]
4906        fn _svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
4907    }
4908    unsafe { _svcmpge_s8(pg, op1, op2) }
4909}
4910#[doc = "Compare greater than or equal to"]
4911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"]
4912#[inline(always)]
4913#[target_feature(enable = "sve")]
4914#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4915#[cfg_attr(test, assert_instr(cmpge))]
4916pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
4917    svcmpge_s8(pg, op1, svdup_n_s8(op2))
4918}
4919#[doc = "Compare greater than or equal to"]
4920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"]
4921#[inline(always)]
4922#[target_feature(enable = "sve")]
4923#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4924#[cfg_attr(test, assert_instr(cmpge))]
4925pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
4926    unsafe extern "unadjusted" {
4927        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv8i16")]
4928        fn _svcmpge_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
4929    }
4930    unsafe { _svcmpge_s16(pg.sve_into(), op1, op2).sve_into() }
4931}
4932#[doc = "Compare greater than or equal to"]
4933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"]
4934#[inline(always)]
4935#[target_feature(enable = "sve")]
4936#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4937#[cfg_attr(test, assert_instr(cmpge))]
4938pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
4939    svcmpge_s16(pg, op1, svdup_n_s16(op2))
4940}
4941#[doc = "Compare greater than or equal to"]
4942#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"]
4943#[inline(always)]
4944#[target_feature(enable = "sve")]
4945#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4946#[cfg_attr(test, assert_instr(cmpge))]
4947pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
4948    unsafe extern "unadjusted" {
4949        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv4i32")]
4950        fn _svcmpge_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
4951    }
4952    unsafe { _svcmpge_s32(pg.sve_into(), op1, op2).sve_into() }
4953}
4954#[doc = "Compare greater than or equal to"]
4955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"]
4956#[inline(always)]
4957#[target_feature(enable = "sve")]
4958#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4959#[cfg_attr(test, assert_instr(cmpge))]
4960pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
4961    svcmpge_s32(pg, op1, svdup_n_s32(op2))
4962}
4963#[doc = "Compare greater than or equal to"]
4964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"]
4965#[inline(always)]
4966#[target_feature(enable = "sve")]
4967#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4968#[cfg_attr(test, assert_instr(cmpge))]
4969pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
4970    unsafe extern "unadjusted" {
4971        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv2i64")]
4972        fn _svcmpge_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
4973    }
4974    unsafe { _svcmpge_s64(pg.sve_into(), op1, op2).sve_into() }
4975}
4976#[doc = "Compare greater than or equal to"]
4977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"]
4978#[inline(always)]
4979#[target_feature(enable = "sve")]
4980#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4981#[cfg_attr(test, assert_instr(cmpge))]
4982pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
4983    svcmpge_s64(pg, op1, svdup_n_s64(op2))
4984}
4985#[doc = "Compare greater than or equal to"]
4986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"]
4987#[inline(always)]
4988#[target_feature(enable = "sve")]
4989#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
4990#[cfg_attr(test, assert_instr(cmphs))]
4991pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
4992    unsafe extern "unadjusted" {
4993        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv16i8")]
4994        fn _svcmpge_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
4995    }
4996    unsafe { _svcmpge_u8(pg, op1.as_signed(), op2.as_signed()) }
4997}
4998#[doc = "Compare greater than or equal to"]
4999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"]
5000#[inline(always)]
5001#[target_feature(enable = "sve")]
5002#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5003#[cfg_attr(test, assert_instr(cmphs))]
5004pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
5005    svcmpge_u8(pg, op1, svdup_n_u8(op2))
5006}
5007#[doc = "Compare greater than or equal to"]
5008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"]
5009#[inline(always)]
5010#[target_feature(enable = "sve")]
5011#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5012#[cfg_attr(test, assert_instr(cmphs))]
5013pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
5014    unsafe extern "unadjusted" {
5015        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv8i16")]
5016        fn _svcmpge_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
5017    }
5018    unsafe { _svcmpge_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5019}
5020#[doc = "Compare greater than or equal to"]
5021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"]
5022#[inline(always)]
5023#[target_feature(enable = "sve")]
5024#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5025#[cfg_attr(test, assert_instr(cmphs))]
5026pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
5027    svcmpge_u16(pg, op1, svdup_n_u16(op2))
5028}
5029#[doc = "Compare greater than or equal to"]
5030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"]
5031#[inline(always)]
5032#[target_feature(enable = "sve")]
5033#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5034#[cfg_attr(test, assert_instr(cmphs))]
5035pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
5036    unsafe extern "unadjusted" {
5037        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv4i32")]
5038        fn _svcmpge_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
5039    }
5040    unsafe { _svcmpge_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5041}
5042#[doc = "Compare greater than or equal to"]
5043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"]
5044#[inline(always)]
5045#[target_feature(enable = "sve")]
5046#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5047#[cfg_attr(test, assert_instr(cmphs))]
5048pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
5049    svcmpge_u32(pg, op1, svdup_n_u32(op2))
5050}
5051#[doc = "Compare greater than or equal to"]
5052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"]
5053#[inline(always)]
5054#[target_feature(enable = "sve")]
5055#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5056#[cfg_attr(test, assert_instr(cmphs))]
5057pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
5058    unsafe extern "unadjusted" {
5059        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv2i64")]
5060        fn _svcmpge_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
5061    }
5062    unsafe { _svcmpge_u64(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5063}
5064#[doc = "Compare greater than or equal to"]
5065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"]
5066#[inline(always)]
5067#[target_feature(enable = "sve")]
5068#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5069#[cfg_attr(test, assert_instr(cmphs))]
5070pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
5071    svcmpge_u64(pg, op1, svdup_n_u64(op2))
5072}
5073#[doc = "Compare greater than or equal to"]
5074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s8])"]
5075#[inline(always)]
5076#[target_feature(enable = "sve")]
5077#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5078#[cfg_attr(test, assert_instr(cmpge))]
5079pub fn svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
5080    unsafe extern "unadjusted" {
5081        #[cfg_attr(
5082            target_arch = "aarch64",
5083            link_name = "llvm.aarch64.sve.cmpge.wide.nxv16i8"
5084        )]
5085        fn _svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
5086    }
5087    unsafe { _svcmpge_wide_s8(pg, op1, op2) }
5088}
5089#[doc = "Compare greater than or equal to"]
5090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s8])"]
5091#[inline(always)]
5092#[target_feature(enable = "sve")]
5093#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5094#[cfg_attr(test, assert_instr(cmpge))]
5095pub fn svcmpge_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
5096    svcmpge_wide_s8(pg, op1, svdup_n_s64(op2))
5097}
5098#[doc = "Compare greater than or equal to"]
5099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s16])"]
5100#[inline(always)]
5101#[target_feature(enable = "sve")]
5102#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5103#[cfg_attr(test, assert_instr(cmpge))]
5104pub fn svcmpge_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
5105    unsafe extern "unadjusted" {
5106        #[cfg_attr(
5107            target_arch = "aarch64",
5108            link_name = "llvm.aarch64.sve.cmpge.wide.nxv8i16"
5109        )]
5110        fn _svcmpge_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
5111    }
5112    unsafe { _svcmpge_wide_s16(pg.sve_into(), op1, op2).sve_into() }
5113}
5114#[doc = "Compare greater than or equal to"]
5115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s16])"]
5116#[inline(always)]
5117#[target_feature(enable = "sve")]
5118#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5119#[cfg_attr(test, assert_instr(cmpge))]
5120pub fn svcmpge_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
5121    svcmpge_wide_s16(pg, op1, svdup_n_s64(op2))
5122}
5123#[doc = "Compare greater than or equal to"]
5124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s32])"]
5125#[inline(always)]
5126#[target_feature(enable = "sve")]
5127#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5128#[cfg_attr(test, assert_instr(cmpge))]
5129pub fn svcmpge_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
5130    unsafe extern "unadjusted" {
5131        #[cfg_attr(
5132            target_arch = "aarch64",
5133            link_name = "llvm.aarch64.sve.cmpge.wide.nxv4i32"
5134        )]
5135        fn _svcmpge_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
5136    }
5137    unsafe { _svcmpge_wide_s32(pg.sve_into(), op1, op2).sve_into() }
5138}
5139#[doc = "Compare greater than or equal to"]
5140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s32])"]
5141#[inline(always)]
5142#[target_feature(enable = "sve")]
5143#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5144#[cfg_attr(test, assert_instr(cmpge))]
5145pub fn svcmpge_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
5146    svcmpge_wide_s32(pg, op1, svdup_n_s64(op2))
5147}
5148#[doc = "Compare greater than or equal to"]
5149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u8])"]
5150#[inline(always)]
5151#[target_feature(enable = "sve")]
5152#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5153#[cfg_attr(test, assert_instr(cmphs))]
5154pub fn svcmpge_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t {
5155    unsafe extern "unadjusted" {
5156        #[cfg_attr(
5157            target_arch = "aarch64",
5158            link_name = "llvm.aarch64.sve.cmphs.wide.nxv16i8"
5159        )]
5160        fn _svcmpge_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
5161    }
5162    unsafe { _svcmpge_wide_u8(pg, op1.as_signed(), op2.as_signed()) }
5163}
5164#[doc = "Compare greater than or equal to"]
5165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u8])"]
5166#[inline(always)]
5167#[target_feature(enable = "sve")]
5168#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5169#[cfg_attr(test, assert_instr(cmphs))]
5170pub fn svcmpge_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t {
5171    svcmpge_wide_u8(pg, op1, svdup_n_u64(op2))
5172}
5173#[doc = "Compare greater than or equal to"]
5174#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u16])"]
5175#[inline(always)]
5176#[target_feature(enable = "sve")]
5177#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5178#[cfg_attr(test, assert_instr(cmphs))]
5179pub fn svcmpge_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t {
5180    unsafe extern "unadjusted" {
5181        #[cfg_attr(
5182            target_arch = "aarch64",
5183            link_name = "llvm.aarch64.sve.cmphs.wide.nxv8i16"
5184        )]
5185        fn _svcmpge_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
5186    }
5187    unsafe { _svcmpge_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5188}
5189#[doc = "Compare greater than or equal to"]
5190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u16])"]
5191#[inline(always)]
5192#[target_feature(enable = "sve")]
5193#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5194#[cfg_attr(test, assert_instr(cmphs))]
5195pub fn svcmpge_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t {
5196    svcmpge_wide_u16(pg, op1, svdup_n_u64(op2))
5197}
5198#[doc = "Compare greater than or equal to"]
5199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u32])"]
5200#[inline(always)]
5201#[target_feature(enable = "sve")]
5202#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5203#[cfg_attr(test, assert_instr(cmphs))]
5204pub fn svcmpge_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t {
5205    unsafe extern "unadjusted" {
5206        #[cfg_attr(
5207            target_arch = "aarch64",
5208            link_name = "llvm.aarch64.sve.cmphs.wide.nxv4i32"
5209        )]
5210        fn _svcmpge_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
5211    }
5212    unsafe { _svcmpge_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5213}
5214#[doc = "Compare greater than or equal to"]
5215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u32])"]
5216#[inline(always)]
5217#[target_feature(enable = "sve")]
5218#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5219#[cfg_attr(test, assert_instr(cmphs))]
5220pub fn svcmpge_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t {
5221    svcmpge_wide_u32(pg, op1, svdup_n_u64(op2))
5222}
5223#[doc = "Compare greater than"]
5224#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"]
5225#[inline(always)]
5226#[target_feature(enable = "sve")]
5227#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5228#[cfg_attr(test, assert_instr(fcmgt))]
5229pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
5230    unsafe extern "unadjusted" {
5231        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv4f32")]
5232        fn _svcmpgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
5233    }
5234    unsafe { _svcmpgt_f32(pg.sve_into(), op1, op2).sve_into() }
5235}
5236#[doc = "Compare greater than"]
5237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"]
5238#[inline(always)]
5239#[target_feature(enable = "sve")]
5240#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5241#[cfg_attr(test, assert_instr(fcmgt))]
5242pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
5243    svcmpgt_f32(pg, op1, svdup_n_f32(op2))
5244}
5245#[doc = "Compare greater than"]
5246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"]
5247#[inline(always)]
5248#[target_feature(enable = "sve")]
5249#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5250#[cfg_attr(test, assert_instr(fcmgt))]
5251pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
5252    unsafe extern "unadjusted" {
5253        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv2f64")]
5254        fn _svcmpgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
5255    }
5256    unsafe { _svcmpgt_f64(pg.sve_into(), op1, op2).sve_into() }
5257}
5258#[doc = "Compare greater than"]
5259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"]
5260#[inline(always)]
5261#[target_feature(enable = "sve")]
5262#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5263#[cfg_attr(test, assert_instr(fcmgt))]
5264pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
5265    svcmpgt_f64(pg, op1, svdup_n_f64(op2))
5266}
5267#[doc = "Compare greater than"]
5268#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"]
5269#[inline(always)]
5270#[target_feature(enable = "sve")]
5271#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5272#[cfg_attr(test, assert_instr(cmpgt))]
5273pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
5274    unsafe extern "unadjusted" {
5275        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv16i8")]
5276        fn _svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
5277    }
5278    unsafe { _svcmpgt_s8(pg, op1, op2) }
5279}
5280#[doc = "Compare greater than"]
5281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"]
5282#[inline(always)]
5283#[target_feature(enable = "sve")]
5284#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5285#[cfg_attr(test, assert_instr(cmpgt))]
5286pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
5287    svcmpgt_s8(pg, op1, svdup_n_s8(op2))
5288}
5289#[doc = "Compare greater than"]
5290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"]
5291#[inline(always)]
5292#[target_feature(enable = "sve")]
5293#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5294#[cfg_attr(test, assert_instr(cmpgt))]
5295pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
5296    unsafe extern "unadjusted" {
5297        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv8i16")]
5298        fn _svcmpgt_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
5299    }
5300    unsafe { _svcmpgt_s16(pg.sve_into(), op1, op2).sve_into() }
5301}
5302#[doc = "Compare greater than"]
5303#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"]
5304#[inline(always)]
5305#[target_feature(enable = "sve")]
5306#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5307#[cfg_attr(test, assert_instr(cmpgt))]
5308pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
5309    svcmpgt_s16(pg, op1, svdup_n_s16(op2))
5310}
5311#[doc = "Compare greater than"]
5312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"]
5313#[inline(always)]
5314#[target_feature(enable = "sve")]
5315#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5316#[cfg_attr(test, assert_instr(cmpgt))]
5317pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
5318    unsafe extern "unadjusted" {
5319        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv4i32")]
5320        fn _svcmpgt_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
5321    }
5322    unsafe { _svcmpgt_s32(pg.sve_into(), op1, op2).sve_into() }
5323}
5324#[doc = "Compare greater than"]
5325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"]
5326#[inline(always)]
5327#[target_feature(enable = "sve")]
5328#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5329#[cfg_attr(test, assert_instr(cmpgt))]
5330pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
5331    svcmpgt_s32(pg, op1, svdup_n_s32(op2))
5332}
5333#[doc = "Compare greater than"]
5334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"]
5335#[inline(always)]
5336#[target_feature(enable = "sve")]
5337#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5338#[cfg_attr(test, assert_instr(cmpgt))]
5339pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
5340    unsafe extern "unadjusted" {
5341        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv2i64")]
5342        fn _svcmpgt_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
5343    }
5344    unsafe { _svcmpgt_s64(pg.sve_into(), op1, op2).sve_into() }
5345}
5346#[doc = "Compare greater than"]
5347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"]
5348#[inline(always)]
5349#[target_feature(enable = "sve")]
5350#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5351#[cfg_attr(test, assert_instr(cmpgt))]
5352pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
5353    svcmpgt_s64(pg, op1, svdup_n_s64(op2))
5354}
5355#[doc = "Compare greater than"]
5356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"]
5357#[inline(always)]
5358#[target_feature(enable = "sve")]
5359#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5360#[cfg_attr(test, assert_instr(cmphi))]
5361pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
5362    unsafe extern "unadjusted" {
5363        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv16i8")]
5364        fn _svcmpgt_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
5365    }
5366    unsafe { _svcmpgt_u8(pg, op1.as_signed(), op2.as_signed()) }
5367}
5368#[doc = "Compare greater than"]
5369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"]
5370#[inline(always)]
5371#[target_feature(enable = "sve")]
5372#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5373#[cfg_attr(test, assert_instr(cmphi))]
5374pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
5375    svcmpgt_u8(pg, op1, svdup_n_u8(op2))
5376}
5377#[doc = "Compare greater than"]
5378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"]
5379#[inline(always)]
5380#[target_feature(enable = "sve")]
5381#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5382#[cfg_attr(test, assert_instr(cmphi))]
5383pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
5384    unsafe extern "unadjusted" {
5385        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv8i16")]
5386        fn _svcmpgt_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
5387    }
5388    unsafe { _svcmpgt_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5389}
5390#[doc = "Compare greater than"]
5391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"]
5392#[inline(always)]
5393#[target_feature(enable = "sve")]
5394#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5395#[cfg_attr(test, assert_instr(cmphi))]
5396pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
5397    svcmpgt_u16(pg, op1, svdup_n_u16(op2))
5398}
5399#[doc = "Compare greater than"]
5400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"]
5401#[inline(always)]
5402#[target_feature(enable = "sve")]
5403#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5404#[cfg_attr(test, assert_instr(cmphi))]
5405pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
5406    unsafe extern "unadjusted" {
5407        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv4i32")]
5408        fn _svcmpgt_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
5409    }
5410    unsafe { _svcmpgt_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5411}
5412#[doc = "Compare greater than"]
5413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"]
5414#[inline(always)]
5415#[target_feature(enable = "sve")]
5416#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5417#[cfg_attr(test, assert_instr(cmphi))]
5418pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
5419    svcmpgt_u32(pg, op1, svdup_n_u32(op2))
5420}
5421#[doc = "Compare greater than"]
5422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"]
5423#[inline(always)]
5424#[target_feature(enable = "sve")]
5425#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5426#[cfg_attr(test, assert_instr(cmphi))]
5427pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
5428    unsafe extern "unadjusted" {
5429        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv2i64")]
5430        fn _svcmpgt_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
5431    }
5432    unsafe { _svcmpgt_u64(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5433}
5434#[doc = "Compare greater than"]
5435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"]
5436#[inline(always)]
5437#[target_feature(enable = "sve")]
5438#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5439#[cfg_attr(test, assert_instr(cmphi))]
5440pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
5441    svcmpgt_u64(pg, op1, svdup_n_u64(op2))
5442}
5443#[doc = "Compare greater than"]
5444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s8])"]
5445#[inline(always)]
5446#[target_feature(enable = "sve")]
5447#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5448#[cfg_attr(test, assert_instr(cmpgt))]
5449pub fn svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
5450    unsafe extern "unadjusted" {
5451        #[cfg_attr(
5452            target_arch = "aarch64",
5453            link_name = "llvm.aarch64.sve.cmpgt.wide.nxv16i8"
5454        )]
5455        fn _svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
5456    }
5457    unsafe { _svcmpgt_wide_s8(pg, op1, op2) }
5458}
5459#[doc = "Compare greater than"]
5460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s8])"]
5461#[inline(always)]
5462#[target_feature(enable = "sve")]
5463#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5464#[cfg_attr(test, assert_instr(cmpgt))]
5465pub fn svcmpgt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
5466    svcmpgt_wide_s8(pg, op1, svdup_n_s64(op2))
5467}
5468#[doc = "Compare greater than"]
5469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s16])"]
5470#[inline(always)]
5471#[target_feature(enable = "sve")]
5472#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5473#[cfg_attr(test, assert_instr(cmpgt))]
5474pub fn svcmpgt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
5475    unsafe extern "unadjusted" {
5476        #[cfg_attr(
5477            target_arch = "aarch64",
5478            link_name = "llvm.aarch64.sve.cmpgt.wide.nxv8i16"
5479        )]
5480        fn _svcmpgt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
5481    }
5482    unsafe { _svcmpgt_wide_s16(pg.sve_into(), op1, op2).sve_into() }
5483}
5484#[doc = "Compare greater than"]
5485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s16])"]
5486#[inline(always)]
5487#[target_feature(enable = "sve")]
5488#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5489#[cfg_attr(test, assert_instr(cmpgt))]
5490pub fn svcmpgt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
5491    svcmpgt_wide_s16(pg, op1, svdup_n_s64(op2))
5492}
5493#[doc = "Compare greater than"]
5494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s32])"]
5495#[inline(always)]
5496#[target_feature(enable = "sve")]
5497#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5498#[cfg_attr(test, assert_instr(cmpgt))]
5499pub fn svcmpgt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
5500    unsafe extern "unadjusted" {
5501        #[cfg_attr(
5502            target_arch = "aarch64",
5503            link_name = "llvm.aarch64.sve.cmpgt.wide.nxv4i32"
5504        )]
5505        fn _svcmpgt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
5506    }
5507    unsafe { _svcmpgt_wide_s32(pg.sve_into(), op1, op2).sve_into() }
5508}
5509#[doc = "Compare greater than"]
5510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s32])"]
5511#[inline(always)]
5512#[target_feature(enable = "sve")]
5513#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5514#[cfg_attr(test, assert_instr(cmpgt))]
5515pub fn svcmpgt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
5516    svcmpgt_wide_s32(pg, op1, svdup_n_s64(op2))
5517}
5518#[doc = "Compare greater than"]
5519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u8])"]
5520#[inline(always)]
5521#[target_feature(enable = "sve")]
5522#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5523#[cfg_attr(test, assert_instr(cmphi))]
5524pub fn svcmpgt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t {
5525    unsafe extern "unadjusted" {
5526        #[cfg_attr(
5527            target_arch = "aarch64",
5528            link_name = "llvm.aarch64.sve.cmphi.wide.nxv16i8"
5529        )]
5530        fn _svcmpgt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
5531    }
5532    unsafe { _svcmpgt_wide_u8(pg, op1.as_signed(), op2.as_signed()) }
5533}
5534#[doc = "Compare greater than"]
5535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u8])"]
5536#[inline(always)]
5537#[target_feature(enable = "sve")]
5538#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5539#[cfg_attr(test, assert_instr(cmphi))]
5540pub fn svcmpgt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t {
5541    svcmpgt_wide_u8(pg, op1, svdup_n_u64(op2))
5542}
5543#[doc = "Compare greater than"]
5544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u16])"]
5545#[inline(always)]
5546#[target_feature(enable = "sve")]
5547#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5548#[cfg_attr(test, assert_instr(cmphi))]
5549pub fn svcmpgt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t {
5550    unsafe extern "unadjusted" {
5551        #[cfg_attr(
5552            target_arch = "aarch64",
5553            link_name = "llvm.aarch64.sve.cmphi.wide.nxv8i16"
5554        )]
5555        fn _svcmpgt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
5556    }
5557    unsafe { _svcmpgt_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5558}
5559#[doc = "Compare greater than"]
5560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u16])"]
5561#[inline(always)]
5562#[target_feature(enable = "sve")]
5563#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5564#[cfg_attr(test, assert_instr(cmphi))]
5565pub fn svcmpgt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t {
5566    svcmpgt_wide_u16(pg, op1, svdup_n_u64(op2))
5567}
5568#[doc = "Compare greater than"]
5569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u32])"]
5570#[inline(always)]
5571#[target_feature(enable = "sve")]
5572#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5573#[cfg_attr(test, assert_instr(cmphi))]
5574pub fn svcmpgt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t {
5575    unsafe extern "unadjusted" {
5576        #[cfg_attr(
5577            target_arch = "aarch64",
5578            link_name = "llvm.aarch64.sve.cmphi.wide.nxv4i32"
5579        )]
5580        fn _svcmpgt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
5581    }
5582    unsafe { _svcmpgt_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5583}
5584#[doc = "Compare greater than"]
5585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u32])"]
5586#[inline(always)]
5587#[target_feature(enable = "sve")]
5588#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5589#[cfg_attr(test, assert_instr(cmphi))]
5590pub fn svcmpgt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t {
5591    svcmpgt_wide_u32(pg, op1, svdup_n_u64(op2))
5592}
5593#[doc = "Compare less than or equal to"]
5594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"]
5595#[inline(always)]
5596#[target_feature(enable = "sve")]
5597#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5598#[cfg_attr(test, assert_instr(fcmge))]
5599pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
5600    svcmpge_f32(pg, op2, op1)
5601}
5602#[doc = "Compare less than or equal to"]
5603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"]
5604#[inline(always)]
5605#[target_feature(enable = "sve")]
5606#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5607#[cfg_attr(test, assert_instr(fcmge))]
5608pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
5609    svcmple_f32(pg, op1, svdup_n_f32(op2))
5610}
5611#[doc = "Compare less than or equal to"]
5612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"]
5613#[inline(always)]
5614#[target_feature(enable = "sve")]
5615#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5616#[cfg_attr(test, assert_instr(fcmge))]
5617pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
5618    svcmpge_f64(pg, op2, op1)
5619}
5620#[doc = "Compare less than or equal to"]
5621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"]
5622#[inline(always)]
5623#[target_feature(enable = "sve")]
5624#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5625#[cfg_attr(test, assert_instr(fcmge))]
5626pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
5627    svcmple_f64(pg, op1, svdup_n_f64(op2))
5628}
5629#[doc = "Compare less than or equal to"]
5630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"]
5631#[inline(always)]
5632#[target_feature(enable = "sve")]
5633#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5634#[cfg_attr(test, assert_instr(cmpge))]
5635pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
5636    svcmpge_s8(pg, op2, op1)
5637}
5638#[doc = "Compare less than or equal to"]
5639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"]
5640#[inline(always)]
5641#[target_feature(enable = "sve")]
5642#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5643#[cfg_attr(test, assert_instr(cmpge))]
5644pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
5645    svcmple_s8(pg, op1, svdup_n_s8(op2))
5646}
5647#[doc = "Compare less than or equal to"]
5648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"]
5649#[inline(always)]
5650#[target_feature(enable = "sve")]
5651#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5652#[cfg_attr(test, assert_instr(cmpge))]
5653pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
5654    svcmpge_s16(pg, op2, op1)
5655}
5656#[doc = "Compare less than or equal to"]
5657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"]
5658#[inline(always)]
5659#[target_feature(enable = "sve")]
5660#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5661#[cfg_attr(test, assert_instr(cmpge))]
5662pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
5663    svcmple_s16(pg, op1, svdup_n_s16(op2))
5664}
5665#[doc = "Compare less than or equal to"]
5666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"]
5667#[inline(always)]
5668#[target_feature(enable = "sve")]
5669#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5670#[cfg_attr(test, assert_instr(cmpge))]
5671pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
5672    svcmpge_s32(pg, op2, op1)
5673}
5674#[doc = "Compare less than or equal to"]
5675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"]
5676#[inline(always)]
5677#[target_feature(enable = "sve")]
5678#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5679#[cfg_attr(test, assert_instr(cmpge))]
5680pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
5681    svcmple_s32(pg, op1, svdup_n_s32(op2))
5682}
5683#[doc = "Compare less than or equal to"]
5684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"]
5685#[inline(always)]
5686#[target_feature(enable = "sve")]
5687#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5688#[cfg_attr(test, assert_instr(cmpge))]
5689pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
5690    svcmpge_s64(pg, op2, op1)
5691}
5692#[doc = "Compare less than or equal to"]
5693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"]
5694#[inline(always)]
5695#[target_feature(enable = "sve")]
5696#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5697#[cfg_attr(test, assert_instr(cmpge))]
5698pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
5699    svcmple_s64(pg, op1, svdup_n_s64(op2))
5700}
5701#[doc = "Compare less than or equal to"]
5702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"]
5703#[inline(always)]
5704#[target_feature(enable = "sve")]
5705#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5706#[cfg_attr(test, assert_instr(cmphs))]
5707pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
5708    svcmpge_u8(pg, op2, op1)
5709}
5710#[doc = "Compare less than or equal to"]
5711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"]
5712#[inline(always)]
5713#[target_feature(enable = "sve")]
5714#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5715#[cfg_attr(test, assert_instr(cmphs))]
5716pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
5717    svcmple_u8(pg, op1, svdup_n_u8(op2))
5718}
5719#[doc = "Compare less than or equal to"]
5720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"]
5721#[inline(always)]
5722#[target_feature(enable = "sve")]
5723#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5724#[cfg_attr(test, assert_instr(cmphs))]
5725pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
5726    svcmpge_u16(pg, op2, op1)
5727}
5728#[doc = "Compare less than or equal to"]
5729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"]
5730#[inline(always)]
5731#[target_feature(enable = "sve")]
5732#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5733#[cfg_attr(test, assert_instr(cmphs))]
5734pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
5735    svcmple_u16(pg, op1, svdup_n_u16(op2))
5736}
5737#[doc = "Compare less than or equal to"]
5738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"]
5739#[inline(always)]
5740#[target_feature(enable = "sve")]
5741#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5742#[cfg_attr(test, assert_instr(cmphs))]
5743pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
5744    svcmpge_u32(pg, op2, op1)
5745}
5746#[doc = "Compare less than or equal to"]
5747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"]
5748#[inline(always)]
5749#[target_feature(enable = "sve")]
5750#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5751#[cfg_attr(test, assert_instr(cmphs))]
5752pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
5753    svcmple_u32(pg, op1, svdup_n_u32(op2))
5754}
5755#[doc = "Compare less than or equal to"]
5756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"]
5757#[inline(always)]
5758#[target_feature(enable = "sve")]
5759#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5760#[cfg_attr(test, assert_instr(cmphs))]
5761pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
5762    svcmpge_u64(pg, op2, op1)
5763}
5764#[doc = "Compare less than or equal to"]
5765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"]
5766#[inline(always)]
5767#[target_feature(enable = "sve")]
5768#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5769#[cfg_attr(test, assert_instr(cmphs))]
5770pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
5771    svcmple_u64(pg, op1, svdup_n_u64(op2))
5772}
5773#[doc = "Compare less than or equal to"]
5774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s8])"]
5775#[inline(always)]
5776#[target_feature(enable = "sve")]
5777#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5778#[cfg_attr(test, assert_instr(cmple))]
5779pub fn svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
5780    unsafe extern "unadjusted" {
5781        #[cfg_attr(
5782            target_arch = "aarch64",
5783            link_name = "llvm.aarch64.sve.cmple.wide.nxv16i8"
5784        )]
5785        fn _svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
5786    }
5787    unsafe { _svcmple_wide_s8(pg, op1, op2) }
5788}
5789#[doc = "Compare less than or equal to"]
5790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s8])"]
5791#[inline(always)]
5792#[target_feature(enable = "sve")]
5793#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5794#[cfg_attr(test, assert_instr(cmple))]
5795pub fn svcmple_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
5796    svcmple_wide_s8(pg, op1, svdup_n_s64(op2))
5797}
5798#[doc = "Compare less than or equal to"]
5799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s16])"]
5800#[inline(always)]
5801#[target_feature(enable = "sve")]
5802#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5803#[cfg_attr(test, assert_instr(cmple))]
5804pub fn svcmple_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
5805    unsafe extern "unadjusted" {
5806        #[cfg_attr(
5807            target_arch = "aarch64",
5808            link_name = "llvm.aarch64.sve.cmple.wide.nxv8i16"
5809        )]
5810        fn _svcmple_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
5811    }
5812    unsafe { _svcmple_wide_s16(pg.sve_into(), op1, op2).sve_into() }
5813}
5814#[doc = "Compare less than or equal to"]
5815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s16])"]
5816#[inline(always)]
5817#[target_feature(enable = "sve")]
5818#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5819#[cfg_attr(test, assert_instr(cmple))]
5820pub fn svcmple_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
5821    svcmple_wide_s16(pg, op1, svdup_n_s64(op2))
5822}
5823#[doc = "Compare less than or equal to"]
5824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s32])"]
5825#[inline(always)]
5826#[target_feature(enable = "sve")]
5827#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5828#[cfg_attr(test, assert_instr(cmple))]
5829pub fn svcmple_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
5830    unsafe extern "unadjusted" {
5831        #[cfg_attr(
5832            target_arch = "aarch64",
5833            link_name = "llvm.aarch64.sve.cmple.wide.nxv4i32"
5834        )]
5835        fn _svcmple_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
5836    }
5837    unsafe { _svcmple_wide_s32(pg.sve_into(), op1, op2).sve_into() }
5838}
5839#[doc = "Compare less than or equal to"]
5840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s32])"]
5841#[inline(always)]
5842#[target_feature(enable = "sve")]
5843#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5844#[cfg_attr(test, assert_instr(cmple))]
5845pub fn svcmple_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
5846    svcmple_wide_s32(pg, op1, svdup_n_s64(op2))
5847}
5848#[doc = "Compare less than or equal to"]
5849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u8])"]
5850#[inline(always)]
5851#[target_feature(enable = "sve")]
5852#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5853#[cfg_attr(test, assert_instr(cmpls))]
5854pub fn svcmple_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t {
5855    unsafe extern "unadjusted" {
5856        #[cfg_attr(
5857            target_arch = "aarch64",
5858            link_name = "llvm.aarch64.sve.cmpls.wide.nxv16i8"
5859        )]
5860        fn _svcmple_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
5861    }
5862    unsafe { _svcmple_wide_u8(pg, op1.as_signed(), op2.as_signed()) }
5863}
5864#[doc = "Compare less than or equal to"]
5865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u8])"]
5866#[inline(always)]
5867#[target_feature(enable = "sve")]
5868#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5869#[cfg_attr(test, assert_instr(cmpls))]
5870pub fn svcmple_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t {
5871    svcmple_wide_u8(pg, op1, svdup_n_u64(op2))
5872}
5873#[doc = "Compare less than or equal to"]
5874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u16])"]
5875#[inline(always)]
5876#[target_feature(enable = "sve")]
5877#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5878#[cfg_attr(test, assert_instr(cmpls))]
5879pub fn svcmple_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t {
5880    unsafe extern "unadjusted" {
5881        #[cfg_attr(
5882            target_arch = "aarch64",
5883            link_name = "llvm.aarch64.sve.cmpls.wide.nxv8i16"
5884        )]
5885        fn _svcmple_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
5886    }
5887    unsafe { _svcmple_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5888}
5889#[doc = "Compare less than or equal to"]
5890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u16])"]
5891#[inline(always)]
5892#[target_feature(enable = "sve")]
5893#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5894#[cfg_attr(test, assert_instr(cmpls))]
5895pub fn svcmple_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t {
5896    svcmple_wide_u16(pg, op1, svdup_n_u64(op2))
5897}
5898#[doc = "Compare less than or equal to"]
5899#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u32])"]
5900#[inline(always)]
5901#[target_feature(enable = "sve")]
5902#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5903#[cfg_attr(test, assert_instr(cmpls))]
5904pub fn svcmple_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t {
5905    unsafe extern "unadjusted" {
5906        #[cfg_attr(
5907            target_arch = "aarch64",
5908            link_name = "llvm.aarch64.sve.cmpls.wide.nxv4i32"
5909        )]
5910        fn _svcmple_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
5911    }
5912    unsafe { _svcmple_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
5913}
5914#[doc = "Compare less than or equal to"]
5915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u32])"]
5916#[inline(always)]
5917#[target_feature(enable = "sve")]
5918#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5919#[cfg_attr(test, assert_instr(cmpls))]
5920pub fn svcmple_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t {
5921    svcmple_wide_u32(pg, op1, svdup_n_u64(op2))
5922}
5923#[doc = "Compare less than"]
5924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"]
5925#[inline(always)]
5926#[target_feature(enable = "sve")]
5927#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5928#[cfg_attr(test, assert_instr(fcmgt))]
5929pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
5930    svcmpgt_f32(pg, op2, op1)
5931}
5932#[doc = "Compare less than"]
5933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"]
5934#[inline(always)]
5935#[target_feature(enable = "sve")]
5936#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5937#[cfg_attr(test, assert_instr(fcmgt))]
5938pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
5939    svcmplt_f32(pg, op1, svdup_n_f32(op2))
5940}
5941#[doc = "Compare less than"]
5942#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"]
5943#[inline(always)]
5944#[target_feature(enable = "sve")]
5945#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5946#[cfg_attr(test, assert_instr(fcmgt))]
5947pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
5948    svcmpgt_f64(pg, op2, op1)
5949}
5950#[doc = "Compare less than"]
5951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"]
5952#[inline(always)]
5953#[target_feature(enable = "sve")]
5954#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5955#[cfg_attr(test, assert_instr(fcmgt))]
5956pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
5957    svcmplt_f64(pg, op1, svdup_n_f64(op2))
5958}
5959#[doc = "Compare less than"]
5960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"]
5961#[inline(always)]
5962#[target_feature(enable = "sve")]
5963#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5964#[cfg_attr(test, assert_instr(cmpgt))]
5965pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
5966    svcmpgt_s8(pg, op2, op1)
5967}
5968#[doc = "Compare less than"]
5969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"]
5970#[inline(always)]
5971#[target_feature(enable = "sve")]
5972#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5973#[cfg_attr(test, assert_instr(cmpgt))]
5974pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
5975    svcmplt_s8(pg, op1, svdup_n_s8(op2))
5976}
5977#[doc = "Compare less than"]
5978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"]
5979#[inline(always)]
5980#[target_feature(enable = "sve")]
5981#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5982#[cfg_attr(test, assert_instr(cmpgt))]
5983pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
5984    svcmpgt_s16(pg, op2, op1)
5985}
5986#[doc = "Compare less than"]
5987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"]
5988#[inline(always)]
5989#[target_feature(enable = "sve")]
5990#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
5991#[cfg_attr(test, assert_instr(cmpgt))]
5992pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
5993    svcmplt_s16(pg, op1, svdup_n_s16(op2))
5994}
5995#[doc = "Compare less than"]
5996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"]
5997#[inline(always)]
5998#[target_feature(enable = "sve")]
5999#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6000#[cfg_attr(test, assert_instr(cmpgt))]
6001pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
6002    svcmpgt_s32(pg, op2, op1)
6003}
6004#[doc = "Compare less than"]
6005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"]
6006#[inline(always)]
6007#[target_feature(enable = "sve")]
6008#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6009#[cfg_attr(test, assert_instr(cmpgt))]
6010pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
6011    svcmplt_s32(pg, op1, svdup_n_s32(op2))
6012}
6013#[doc = "Compare less than"]
6014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"]
6015#[inline(always)]
6016#[target_feature(enable = "sve")]
6017#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6018#[cfg_attr(test, assert_instr(cmpgt))]
6019pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
6020    svcmpgt_s64(pg, op2, op1)
6021}
6022#[doc = "Compare less than"]
6023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"]
6024#[inline(always)]
6025#[target_feature(enable = "sve")]
6026#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6027#[cfg_attr(test, assert_instr(cmpgt))]
6028pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
6029    svcmplt_s64(pg, op1, svdup_n_s64(op2))
6030}
6031#[doc = "Compare less than"]
6032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"]
6033#[inline(always)]
6034#[target_feature(enable = "sve")]
6035#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6036#[cfg_attr(test, assert_instr(cmphi))]
6037pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
6038    svcmpgt_u8(pg, op2, op1)
6039}
6040#[doc = "Compare less than"]
6041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"]
6042#[inline(always)]
6043#[target_feature(enable = "sve")]
6044#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6045#[cfg_attr(test, assert_instr(cmphi))]
6046pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
6047    svcmplt_u8(pg, op1, svdup_n_u8(op2))
6048}
6049#[doc = "Compare less than"]
6050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"]
6051#[inline(always)]
6052#[target_feature(enable = "sve")]
6053#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6054#[cfg_attr(test, assert_instr(cmphi))]
6055pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
6056    svcmpgt_u16(pg, op2, op1)
6057}
6058#[doc = "Compare less than"]
6059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"]
6060#[inline(always)]
6061#[target_feature(enable = "sve")]
6062#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6063#[cfg_attr(test, assert_instr(cmphi))]
6064pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
6065    svcmplt_u16(pg, op1, svdup_n_u16(op2))
6066}
6067#[doc = "Compare less than"]
6068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"]
6069#[inline(always)]
6070#[target_feature(enable = "sve")]
6071#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6072#[cfg_attr(test, assert_instr(cmphi))]
6073pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
6074    svcmpgt_u32(pg, op2, op1)
6075}
6076#[doc = "Compare less than"]
6077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"]
6078#[inline(always)]
6079#[target_feature(enable = "sve")]
6080#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6081#[cfg_attr(test, assert_instr(cmphi))]
6082pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
6083    svcmplt_u32(pg, op1, svdup_n_u32(op2))
6084}
6085#[doc = "Compare less than"]
6086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"]
6087#[inline(always)]
6088#[target_feature(enable = "sve")]
6089#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6090#[cfg_attr(test, assert_instr(cmphi))]
6091pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
6092    svcmpgt_u64(pg, op2, op1)
6093}
6094#[doc = "Compare less than"]
6095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"]
6096#[inline(always)]
6097#[target_feature(enable = "sve")]
6098#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6099#[cfg_attr(test, assert_instr(cmphi))]
6100pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
6101    svcmplt_u64(pg, op1, svdup_n_u64(op2))
6102}
6103#[doc = "Compare less than"]
6104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s8])"]
6105#[inline(always)]
6106#[target_feature(enable = "sve")]
6107#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6108#[cfg_attr(test, assert_instr(cmplt))]
6109pub fn svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
6110    unsafe extern "unadjusted" {
6111        #[cfg_attr(
6112            target_arch = "aarch64",
6113            link_name = "llvm.aarch64.sve.cmplt.wide.nxv16i8"
6114        )]
6115        fn _svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
6116    }
6117    unsafe { _svcmplt_wide_s8(pg, op1, op2) }
6118}
6119#[doc = "Compare less than"]
6120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s8])"]
6121#[inline(always)]
6122#[target_feature(enable = "sve")]
6123#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6124#[cfg_attr(test, assert_instr(cmplt))]
6125pub fn svcmplt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
6126    svcmplt_wide_s8(pg, op1, svdup_n_s64(op2))
6127}
6128#[doc = "Compare less than"]
6129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s16])"]
6130#[inline(always)]
6131#[target_feature(enable = "sve")]
6132#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6133#[cfg_attr(test, assert_instr(cmplt))]
6134pub fn svcmplt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
6135    unsafe extern "unadjusted" {
6136        #[cfg_attr(
6137            target_arch = "aarch64",
6138            link_name = "llvm.aarch64.sve.cmplt.wide.nxv8i16"
6139        )]
6140        fn _svcmplt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
6141    }
6142    unsafe { _svcmplt_wide_s16(pg.sve_into(), op1, op2).sve_into() }
6143}
6144#[doc = "Compare less than"]
6145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s16])"]
6146#[inline(always)]
6147#[target_feature(enable = "sve")]
6148#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6149#[cfg_attr(test, assert_instr(cmplt))]
6150pub fn svcmplt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
6151    svcmplt_wide_s16(pg, op1, svdup_n_s64(op2))
6152}
6153#[doc = "Compare less than"]
6154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s32])"]
6155#[inline(always)]
6156#[target_feature(enable = "sve")]
6157#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6158#[cfg_attr(test, assert_instr(cmplt))]
6159pub fn svcmplt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
6160    unsafe extern "unadjusted" {
6161        #[cfg_attr(
6162            target_arch = "aarch64",
6163            link_name = "llvm.aarch64.sve.cmplt.wide.nxv4i32"
6164        )]
6165        fn _svcmplt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
6166    }
6167    unsafe { _svcmplt_wide_s32(pg.sve_into(), op1, op2).sve_into() }
6168}
6169#[doc = "Compare less than"]
6170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s32])"]
6171#[inline(always)]
6172#[target_feature(enable = "sve")]
6173#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6174#[cfg_attr(test, assert_instr(cmplt))]
6175pub fn svcmplt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
6176    svcmplt_wide_s32(pg, op1, svdup_n_s64(op2))
6177}
6178#[doc = "Compare less than"]
6179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u8])"]
6180#[inline(always)]
6181#[target_feature(enable = "sve")]
6182#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6183#[cfg_attr(test, assert_instr(cmplo))]
6184pub fn svcmplt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t {
6185    unsafe extern "unadjusted" {
6186        #[cfg_attr(
6187            target_arch = "aarch64",
6188            link_name = "llvm.aarch64.sve.cmplo.wide.nxv16i8"
6189        )]
6190        fn _svcmplt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
6191    }
6192    unsafe { _svcmplt_wide_u8(pg, op1.as_signed(), op2.as_signed()) }
6193}
6194#[doc = "Compare less than"]
6195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u8])"]
6196#[inline(always)]
6197#[target_feature(enable = "sve")]
6198#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6199#[cfg_attr(test, assert_instr(cmplo))]
6200pub fn svcmplt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t {
6201    svcmplt_wide_u8(pg, op1, svdup_n_u64(op2))
6202}
6203#[doc = "Compare less than"]
6204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u16])"]
6205#[inline(always)]
6206#[target_feature(enable = "sve")]
6207#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6208#[cfg_attr(test, assert_instr(cmplo))]
6209pub fn svcmplt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t {
6210    unsafe extern "unadjusted" {
6211        #[cfg_attr(
6212            target_arch = "aarch64",
6213            link_name = "llvm.aarch64.sve.cmplo.wide.nxv8i16"
6214        )]
6215        fn _svcmplt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
6216    }
6217    unsafe { _svcmplt_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
6218}
6219#[doc = "Compare less than"]
6220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u16])"]
6221#[inline(always)]
6222#[target_feature(enable = "sve")]
6223#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6224#[cfg_attr(test, assert_instr(cmplo))]
6225pub fn svcmplt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t {
6226    svcmplt_wide_u16(pg, op1, svdup_n_u64(op2))
6227}
6228#[doc = "Compare less than"]
6229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u32])"]
6230#[inline(always)]
6231#[target_feature(enable = "sve")]
6232#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6233#[cfg_attr(test, assert_instr(cmplo))]
6234pub fn svcmplt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t {
6235    unsafe extern "unadjusted" {
6236        #[cfg_attr(
6237            target_arch = "aarch64",
6238            link_name = "llvm.aarch64.sve.cmplo.wide.nxv4i32"
6239        )]
6240        fn _svcmplt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
6241    }
6242    unsafe { _svcmplt_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() }
6243}
6244#[doc = "Compare less than"]
6245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u32])"]
6246#[inline(always)]
6247#[target_feature(enable = "sve")]
6248#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6249#[cfg_attr(test, assert_instr(cmplo))]
6250pub fn svcmplt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t {
6251    svcmplt_wide_u32(pg, op1, svdup_n_u64(op2))
6252}
6253#[doc = "Compare not equal to"]
6254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"]
6255#[inline(always)]
6256#[target_feature(enable = "sve")]
6257#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6258#[cfg_attr(test, assert_instr(fcmne))]
6259pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
6260    unsafe extern "unadjusted" {
6261        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv4f32")]
6262        fn _svcmpne_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
6263    }
6264    unsafe { _svcmpne_f32(pg.sve_into(), op1, op2).sve_into() }
6265}
6266#[doc = "Compare not equal to"]
6267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"]
6268#[inline(always)]
6269#[target_feature(enable = "sve")]
6270#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6271#[cfg_attr(test, assert_instr(fcmne))]
6272pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
6273    svcmpne_f32(pg, op1, svdup_n_f32(op2))
6274}
6275#[doc = "Compare not equal to"]
6276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"]
6277#[inline(always)]
6278#[target_feature(enable = "sve")]
6279#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6280#[cfg_attr(test, assert_instr(fcmne))]
6281pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
6282    unsafe extern "unadjusted" {
6283        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv2f64")]
6284        fn _svcmpne_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
6285    }
6286    unsafe { _svcmpne_f64(pg.sve_into(), op1, op2).sve_into() }
6287}
6288#[doc = "Compare not equal to"]
6289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"]
6290#[inline(always)]
6291#[target_feature(enable = "sve")]
6292#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6293#[cfg_attr(test, assert_instr(fcmne))]
6294pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
6295    svcmpne_f64(pg, op1, svdup_n_f64(op2))
6296}
6297#[doc = "Compare not equal to"]
6298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"]
6299#[inline(always)]
6300#[target_feature(enable = "sve")]
6301#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6302#[cfg_attr(test, assert_instr(cmpne))]
6303pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
6304    unsafe extern "unadjusted" {
6305        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv16i8")]
6306        fn _svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
6307    }
6308    unsafe { _svcmpne_s8(pg, op1, op2) }
6309}
6310#[doc = "Compare not equal to"]
6311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"]
6312#[inline(always)]
6313#[target_feature(enable = "sve")]
6314#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6315#[cfg_attr(test, assert_instr(cmpne))]
6316pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
6317    svcmpne_s8(pg, op1, svdup_n_s8(op2))
6318}
6319#[doc = "Compare not equal to"]
6320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"]
6321#[inline(always)]
6322#[target_feature(enable = "sve")]
6323#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6324#[cfg_attr(test, assert_instr(cmpne))]
6325pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
6326    unsafe extern "unadjusted" {
6327        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv8i16")]
6328        fn _svcmpne_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
6329    }
6330    unsafe { _svcmpne_s16(pg.sve_into(), op1, op2).sve_into() }
6331}
6332#[doc = "Compare not equal to"]
6333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"]
6334#[inline(always)]
6335#[target_feature(enable = "sve")]
6336#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6337#[cfg_attr(test, assert_instr(cmpne))]
6338pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
6339    svcmpne_s16(pg, op1, svdup_n_s16(op2))
6340}
6341#[doc = "Compare not equal to"]
6342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"]
6343#[inline(always)]
6344#[target_feature(enable = "sve")]
6345#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6346#[cfg_attr(test, assert_instr(cmpne))]
6347pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
6348    unsafe extern "unadjusted" {
6349        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv4i32")]
6350        fn _svcmpne_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
6351    }
6352    unsafe { _svcmpne_s32(pg.sve_into(), op1, op2).sve_into() }
6353}
6354#[doc = "Compare not equal to"]
6355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"]
6356#[inline(always)]
6357#[target_feature(enable = "sve")]
6358#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6359#[cfg_attr(test, assert_instr(cmpne))]
6360pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
6361    svcmpne_s32(pg, op1, svdup_n_s32(op2))
6362}
6363#[doc = "Compare not equal to"]
6364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"]
6365#[inline(always)]
6366#[target_feature(enable = "sve")]
6367#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6368#[cfg_attr(test, assert_instr(cmpne))]
6369pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
6370    unsafe extern "unadjusted" {
6371        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv2i64")]
6372        fn _svcmpne_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
6373    }
6374    unsafe { _svcmpne_s64(pg.sve_into(), op1, op2).sve_into() }
6375}
6376#[doc = "Compare not equal to"]
6377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"]
6378#[inline(always)]
6379#[target_feature(enable = "sve")]
6380#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6381#[cfg_attr(test, assert_instr(cmpne))]
6382pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
6383    svcmpne_s64(pg, op1, svdup_n_s64(op2))
6384}
6385#[doc = "Compare not equal to"]
6386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"]
6387#[inline(always)]
6388#[target_feature(enable = "sve")]
6389#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6390#[cfg_attr(test, assert_instr(cmpne))]
6391pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
6392    unsafe { svcmpne_s8(pg, op1.as_signed(), op2.as_signed()) }
6393}
6394#[doc = "Compare not equal to"]
6395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"]
6396#[inline(always)]
6397#[target_feature(enable = "sve")]
6398#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6399#[cfg_attr(test, assert_instr(cmpne))]
6400pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
6401    svcmpne_u8(pg, op1, svdup_n_u8(op2))
6402}
6403#[doc = "Compare not equal to"]
6404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"]
6405#[inline(always)]
6406#[target_feature(enable = "sve")]
6407#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6408#[cfg_attr(test, assert_instr(cmpne))]
6409pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
6410    unsafe { svcmpne_s16(pg, op1.as_signed(), op2.as_signed()) }
6411}
6412#[doc = "Compare not equal to"]
6413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"]
6414#[inline(always)]
6415#[target_feature(enable = "sve")]
6416#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6417#[cfg_attr(test, assert_instr(cmpne))]
6418pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
6419    svcmpne_u16(pg, op1, svdup_n_u16(op2))
6420}
6421#[doc = "Compare not equal to"]
6422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"]
6423#[inline(always)]
6424#[target_feature(enable = "sve")]
6425#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6426#[cfg_attr(test, assert_instr(cmpne))]
6427pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
6428    unsafe { svcmpne_s32(pg, op1.as_signed(), op2.as_signed()) }
6429}
6430#[doc = "Compare not equal to"]
6431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"]
6432#[inline(always)]
6433#[target_feature(enable = "sve")]
6434#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6435#[cfg_attr(test, assert_instr(cmpne))]
6436pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
6437    svcmpne_u32(pg, op1, svdup_n_u32(op2))
6438}
6439#[doc = "Compare not equal to"]
6440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"]
6441#[inline(always)]
6442#[target_feature(enable = "sve")]
6443#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6444#[cfg_attr(test, assert_instr(cmpne))]
6445pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
6446    unsafe { svcmpne_s64(pg, op1.as_signed(), op2.as_signed()) }
6447}
6448#[doc = "Compare not equal to"]
6449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"]
6450#[inline(always)]
6451#[target_feature(enable = "sve")]
6452#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6453#[cfg_attr(test, assert_instr(cmpne))]
6454pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
6455    svcmpne_u64(pg, op1, svdup_n_u64(op2))
6456}
6457#[doc = "Compare not equal to"]
6458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s8])"]
6459#[inline(always)]
6460#[target_feature(enable = "sve")]
6461#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6462#[cfg_attr(test, assert_instr(cmpne))]
6463pub fn svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
6464    unsafe extern "unadjusted" {
6465        #[cfg_attr(
6466            target_arch = "aarch64",
6467            link_name = "llvm.aarch64.sve.cmpne.wide.nxv16i8"
6468        )]
6469        fn _svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
6470    }
6471    unsafe { _svcmpne_wide_s8(pg, op1, op2) }
6472}
6473#[doc = "Compare not equal to"]
6474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s8])"]
6475#[inline(always)]
6476#[target_feature(enable = "sve")]
6477#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6478#[cfg_attr(test, assert_instr(cmpne))]
6479pub fn svcmpne_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
6480    svcmpne_wide_s8(pg, op1, svdup_n_s64(op2))
6481}
6482#[doc = "Compare not equal to"]
6483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s16])"]
6484#[inline(always)]
6485#[target_feature(enable = "sve")]
6486#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6487#[cfg_attr(test, assert_instr(cmpne))]
6488pub fn svcmpne_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
6489    unsafe extern "unadjusted" {
6490        #[cfg_attr(
6491            target_arch = "aarch64",
6492            link_name = "llvm.aarch64.sve.cmpne.wide.nxv8i16"
6493        )]
6494        fn _svcmpne_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
6495    }
6496    unsafe { _svcmpne_wide_s16(pg.sve_into(), op1, op2).sve_into() }
6497}
6498#[doc = "Compare not equal to"]
6499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s16])"]
6500#[inline(always)]
6501#[target_feature(enable = "sve")]
6502#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6503#[cfg_attr(test, assert_instr(cmpne))]
6504pub fn svcmpne_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
6505    svcmpne_wide_s16(pg, op1, svdup_n_s64(op2))
6506}
6507#[doc = "Compare not equal to"]
6508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s32])"]
6509#[inline(always)]
6510#[target_feature(enable = "sve")]
6511#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6512#[cfg_attr(test, assert_instr(cmpne))]
6513pub fn svcmpne_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
6514    unsafe extern "unadjusted" {
6515        #[cfg_attr(
6516            target_arch = "aarch64",
6517            link_name = "llvm.aarch64.sve.cmpne.wide.nxv4i32"
6518        )]
6519        fn _svcmpne_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
6520    }
6521    unsafe { _svcmpne_wide_s32(pg.sve_into(), op1, op2).sve_into() }
6522}
6523#[doc = "Compare not equal to"]
6524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s32])"]
6525#[inline(always)]
6526#[target_feature(enable = "sve")]
6527#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6528#[cfg_attr(test, assert_instr(cmpne))]
6529pub fn svcmpne_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
6530    svcmpne_wide_s32(pg, op1, svdup_n_s64(op2))
6531}
6532#[doc = "Compare unordered with"]
6533#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f32])"]
6534#[inline(always)]
6535#[target_feature(enable = "sve")]
6536#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6537#[cfg_attr(test, assert_instr(fcmuo))]
6538pub fn svcmpuo_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
6539    unsafe extern "unadjusted" {
6540        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv4f32")]
6541        fn _svcmpuo_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
6542    }
6543    unsafe { _svcmpuo_f32(pg.sve_into(), op1, op2).sve_into() }
6544}
6545#[doc = "Compare unordered with"]
6546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f32])"]
6547#[inline(always)]
6548#[target_feature(enable = "sve")]
6549#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6550#[cfg_attr(test, assert_instr(fcmuo))]
6551pub fn svcmpuo_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
6552    svcmpuo_f32(pg, op1, svdup_n_f32(op2))
6553}
6554#[doc = "Compare unordered with"]
6555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f64])"]
6556#[inline(always)]
6557#[target_feature(enable = "sve")]
6558#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6559#[cfg_attr(test, assert_instr(fcmuo))]
6560pub fn svcmpuo_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
6561    unsafe extern "unadjusted" {
6562        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv2f64")]
6563        fn _svcmpuo_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
6564    }
6565    unsafe { _svcmpuo_f64(pg.sve_into(), op1, op2).sve_into() }
6566}
6567#[doc = "Compare unordered with"]
6568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f64])"]
6569#[inline(always)]
6570#[target_feature(enable = "sve")]
6571#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6572#[cfg_attr(test, assert_instr(fcmuo))]
6573pub fn svcmpuo_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
6574    svcmpuo_f64(pg, op1, svdup_n_f64(op2))
6575}
6576#[doc = "Logically invert boolean condition"]
6577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"]
6578#[inline(always)]
6579#[target_feature(enable = "sve")]
6580#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6581#[cfg_attr(test, assert_instr(cnot))]
6582pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
6583    unsafe extern "unadjusted" {
6584        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv16i8")]
6585        fn _svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
6586    }
6587    unsafe { _svcnot_s8_m(inactive, pg, op) }
6588}
6589#[doc = "Logically invert boolean condition"]
6590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"]
6591#[inline(always)]
6592#[target_feature(enable = "sve")]
6593#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6594#[cfg_attr(test, assert_instr(cnot))]
6595pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
6596    svcnot_s8_m(op, pg, op)
6597}
6598#[doc = "Logically invert boolean condition"]
6599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"]
6600#[inline(always)]
6601#[target_feature(enable = "sve")]
6602#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6603#[cfg_attr(test, assert_instr(cnot))]
6604pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
6605    svcnot_s8_m(svdup_n_s8(0), pg, op)
6606}
6607#[doc = "Logically invert boolean condition"]
6608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"]
6609#[inline(always)]
6610#[target_feature(enable = "sve")]
6611#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6612#[cfg_attr(test, assert_instr(cnot))]
6613pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
6614    unsafe extern "unadjusted" {
6615        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv8i16")]
6616        fn _svcnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
6617    }
6618    unsafe { _svcnot_s16_m(inactive, pg.sve_into(), op) }
6619}
6620#[doc = "Logically invert boolean condition"]
6621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"]
6622#[inline(always)]
6623#[target_feature(enable = "sve")]
6624#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6625#[cfg_attr(test, assert_instr(cnot))]
6626pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
6627    svcnot_s16_m(op, pg, op)
6628}
6629#[doc = "Logically invert boolean condition"]
6630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"]
6631#[inline(always)]
6632#[target_feature(enable = "sve")]
6633#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6634#[cfg_attr(test, assert_instr(cnot))]
6635pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
6636    svcnot_s16_m(svdup_n_s16(0), pg, op)
6637}
6638#[doc = "Logically invert boolean condition"]
6639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"]
6640#[inline(always)]
6641#[target_feature(enable = "sve")]
6642#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6643#[cfg_attr(test, assert_instr(cnot))]
6644pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
6645    unsafe extern "unadjusted" {
6646        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv4i32")]
6647        fn _svcnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
6648    }
6649    unsafe { _svcnot_s32_m(inactive, pg.sve_into(), op) }
6650}
6651#[doc = "Logically invert boolean condition"]
6652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"]
6653#[inline(always)]
6654#[target_feature(enable = "sve")]
6655#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6656#[cfg_attr(test, assert_instr(cnot))]
6657pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
6658    svcnot_s32_m(op, pg, op)
6659}
6660#[doc = "Logically invert boolean condition"]
6661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"]
6662#[inline(always)]
6663#[target_feature(enable = "sve")]
6664#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6665#[cfg_attr(test, assert_instr(cnot))]
6666pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
6667    svcnot_s32_m(svdup_n_s32(0), pg, op)
6668}
6669#[doc = "Logically invert boolean condition"]
6670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"]
6671#[inline(always)]
6672#[target_feature(enable = "sve")]
6673#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6674#[cfg_attr(test, assert_instr(cnot))]
6675pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
6676    unsafe extern "unadjusted" {
6677        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv2i64")]
6678        fn _svcnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
6679    }
6680    unsafe { _svcnot_s64_m(inactive, pg.sve_into(), op) }
6681}
6682#[doc = "Logically invert boolean condition"]
6683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"]
6684#[inline(always)]
6685#[target_feature(enable = "sve")]
6686#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6687#[cfg_attr(test, assert_instr(cnot))]
6688pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
6689    svcnot_s64_m(op, pg, op)
6690}
6691#[doc = "Logically invert boolean condition"]
6692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"]
6693#[inline(always)]
6694#[target_feature(enable = "sve")]
6695#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6696#[cfg_attr(test, assert_instr(cnot))]
6697pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
6698    svcnot_s64_m(svdup_n_s64(0), pg, op)
6699}
6700#[doc = "Logically invert boolean condition"]
6701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"]
6702#[inline(always)]
6703#[target_feature(enable = "sve")]
6704#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6705#[cfg_attr(test, assert_instr(cnot))]
6706pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t {
6707    unsafe { svcnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
6708}
6709#[doc = "Logically invert boolean condition"]
6710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"]
6711#[inline(always)]
6712#[target_feature(enable = "sve")]
6713#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6714#[cfg_attr(test, assert_instr(cnot))]
6715pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t {
6716    svcnot_u8_m(op, pg, op)
6717}
6718#[doc = "Logically invert boolean condition"]
6719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"]
6720#[inline(always)]
6721#[target_feature(enable = "sve")]
6722#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6723#[cfg_attr(test, assert_instr(cnot))]
6724pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t {
6725    svcnot_u8_m(svdup_n_u8(0), pg, op)
6726}
6727#[doc = "Logically invert boolean condition"]
6728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"]
6729#[inline(always)]
6730#[target_feature(enable = "sve")]
6731#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6732#[cfg_attr(test, assert_instr(cnot))]
6733pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
6734    unsafe { svcnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
6735}
6736#[doc = "Logically invert boolean condition"]
6737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"]
6738#[inline(always)]
6739#[target_feature(enable = "sve")]
6740#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6741#[cfg_attr(test, assert_instr(cnot))]
6742pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
6743    svcnot_u16_m(op, pg, op)
6744}
6745#[doc = "Logically invert boolean condition"]
6746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"]
6747#[inline(always)]
6748#[target_feature(enable = "sve")]
6749#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6750#[cfg_attr(test, assert_instr(cnot))]
6751pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
6752    svcnot_u16_m(svdup_n_u16(0), pg, op)
6753}
6754#[doc = "Logically invert boolean condition"]
6755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"]
6756#[inline(always)]
6757#[target_feature(enable = "sve")]
6758#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6759#[cfg_attr(test, assert_instr(cnot))]
6760pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
6761    unsafe { svcnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
6762}
6763#[doc = "Logically invert boolean condition"]
6764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"]
6765#[inline(always)]
6766#[target_feature(enable = "sve")]
6767#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6768#[cfg_attr(test, assert_instr(cnot))]
6769pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
6770    svcnot_u32_m(op, pg, op)
6771}
6772#[doc = "Logically invert boolean condition"]
6773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"]
6774#[inline(always)]
6775#[target_feature(enable = "sve")]
6776#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6777#[cfg_attr(test, assert_instr(cnot))]
6778pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
6779    svcnot_u32_m(svdup_n_u32(0), pg, op)
6780}
6781#[doc = "Logically invert boolean condition"]
6782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"]
6783#[inline(always)]
6784#[target_feature(enable = "sve")]
6785#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6786#[cfg_attr(test, assert_instr(cnot))]
6787pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
6788    unsafe { svcnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
6789}
6790#[doc = "Logically invert boolean condition"]
6791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"]
6792#[inline(always)]
6793#[target_feature(enable = "sve")]
6794#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6795#[cfg_attr(test, assert_instr(cnot))]
6796pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
6797    svcnot_u64_m(op, pg, op)
6798}
6799#[doc = "Logically invert boolean condition"]
6800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"]
6801#[inline(always)]
6802#[target_feature(enable = "sve")]
6803#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6804#[cfg_attr(test, assert_instr(cnot))]
6805pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
6806    svcnot_u64_m(svdup_n_u64(0), pg, op)
6807}
6808#[doc = "Count nonzero bits"]
6809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_m)"]
6810#[inline(always)]
6811#[target_feature(enable = "sve")]
6812#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6813#[cfg_attr(test, assert_instr(cnt))]
6814pub fn svcnt_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t {
6815    unsafe extern "unadjusted" {
6816        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4f32")]
6817        fn _svcnt_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t;
6818    }
6819    unsafe { _svcnt_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
6820}
6821#[doc = "Count nonzero bits"]
6822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_x)"]
6823#[inline(always)]
6824#[target_feature(enable = "sve")]
6825#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6826#[cfg_attr(test, assert_instr(cnt))]
6827pub fn svcnt_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t {
6828    unsafe { svcnt_f32_m(transmute_unchecked(op), pg, op) }
6829}
6830#[doc = "Count nonzero bits"]
6831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_z)"]
6832#[inline(always)]
6833#[target_feature(enable = "sve")]
6834#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6835#[cfg_attr(test, assert_instr(cnt))]
6836pub fn svcnt_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t {
6837    svcnt_f32_m(svdup_n_u32(0), pg, op)
6838}
6839#[doc = "Count nonzero bits"]
6840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_m)"]
6841#[inline(always)]
6842#[target_feature(enable = "sve")]
6843#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6844#[cfg_attr(test, assert_instr(cnt))]
6845pub fn svcnt_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t {
6846    unsafe extern "unadjusted" {
6847        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2f64")]
6848        fn _svcnt_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t;
6849    }
6850    unsafe { _svcnt_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
6851}
6852#[doc = "Count nonzero bits"]
6853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_x)"]
6854#[inline(always)]
6855#[target_feature(enable = "sve")]
6856#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6857#[cfg_attr(test, assert_instr(cnt))]
6858pub fn svcnt_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t {
6859    unsafe { svcnt_f64_m(transmute_unchecked(op), pg, op) }
6860}
6861#[doc = "Count nonzero bits"]
6862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_z)"]
6863#[inline(always)]
6864#[target_feature(enable = "sve")]
6865#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6866#[cfg_attr(test, assert_instr(cnt))]
6867pub fn svcnt_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t {
6868    svcnt_f64_m(svdup_n_u64(0), pg, op)
6869}
6870#[doc = "Count nonzero bits"]
6871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_m)"]
6872#[inline(always)]
6873#[target_feature(enable = "sve")]
6874#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6875#[cfg_attr(test, assert_instr(cnt))]
6876pub fn svcnt_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t {
6877    unsafe extern "unadjusted" {
6878        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv16i8")]
6879        fn _svcnt_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
6880    }
6881    unsafe { _svcnt_s8_m(inactive.as_signed(), pg, op).as_unsigned() }
6882}
6883#[doc = "Count nonzero bits"]
6884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_x)"]
6885#[inline(always)]
6886#[target_feature(enable = "sve")]
6887#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6888#[cfg_attr(test, assert_instr(cnt))]
6889pub fn svcnt_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t {
6890    unsafe { svcnt_s8_m(op.as_unsigned(), pg, op) }
6891}
6892#[doc = "Count nonzero bits"]
6893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_z)"]
6894#[inline(always)]
6895#[target_feature(enable = "sve")]
6896#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6897#[cfg_attr(test, assert_instr(cnt))]
6898pub fn svcnt_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t {
6899    svcnt_s8_m(svdup_n_u8(0), pg, op)
6900}
6901#[doc = "Count nonzero bits"]
6902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_m)"]
6903#[inline(always)]
6904#[target_feature(enable = "sve")]
6905#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6906#[cfg_attr(test, assert_instr(cnt))]
6907pub fn svcnt_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t {
6908    unsafe extern "unadjusted" {
6909        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv8i16")]
6910        fn _svcnt_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
6911    }
6912    unsafe { _svcnt_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
6913}
6914#[doc = "Count nonzero bits"]
6915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_x)"]
6916#[inline(always)]
6917#[target_feature(enable = "sve")]
6918#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6919#[cfg_attr(test, assert_instr(cnt))]
6920pub fn svcnt_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t {
6921    unsafe { svcnt_s16_m(op.as_unsigned(), pg, op) }
6922}
6923#[doc = "Count nonzero bits"]
6924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_z)"]
6925#[inline(always)]
6926#[target_feature(enable = "sve")]
6927#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6928#[cfg_attr(test, assert_instr(cnt))]
6929pub fn svcnt_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t {
6930    svcnt_s16_m(svdup_n_u16(0), pg, op)
6931}
6932#[doc = "Count nonzero bits"]
6933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_m)"]
6934#[inline(always)]
6935#[target_feature(enable = "sve")]
6936#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6937#[cfg_attr(test, assert_instr(cnt))]
6938pub fn svcnt_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t {
6939    unsafe extern "unadjusted" {
6940        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4i32")]
6941        fn _svcnt_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
6942    }
6943    unsafe { _svcnt_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
6944}
6945#[doc = "Count nonzero bits"]
6946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_x)"]
6947#[inline(always)]
6948#[target_feature(enable = "sve")]
6949#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6950#[cfg_attr(test, assert_instr(cnt))]
6951pub fn svcnt_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t {
6952    unsafe { svcnt_s32_m(op.as_unsigned(), pg, op) }
6953}
6954#[doc = "Count nonzero bits"]
6955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_z)"]
6956#[inline(always)]
6957#[target_feature(enable = "sve")]
6958#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6959#[cfg_attr(test, assert_instr(cnt))]
6960pub fn svcnt_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t {
6961    svcnt_s32_m(svdup_n_u32(0), pg, op)
6962}
6963#[doc = "Count nonzero bits"]
6964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_m)"]
6965#[inline(always)]
6966#[target_feature(enable = "sve")]
6967#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6968#[cfg_attr(test, assert_instr(cnt))]
6969pub fn svcnt_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t {
6970    unsafe extern "unadjusted" {
6971        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2i64")]
6972        fn _svcnt_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
6973    }
6974    unsafe { _svcnt_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
6975}
6976#[doc = "Count nonzero bits"]
6977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_x)"]
6978#[inline(always)]
6979#[target_feature(enable = "sve")]
6980#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6981#[cfg_attr(test, assert_instr(cnt))]
6982pub fn svcnt_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t {
6983    unsafe { svcnt_s64_m(op.as_unsigned(), pg, op) }
6984}
6985#[doc = "Count nonzero bits"]
6986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_z)"]
6987#[inline(always)]
6988#[target_feature(enable = "sve")]
6989#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6990#[cfg_attr(test, assert_instr(cnt))]
6991pub fn svcnt_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t {
6992    svcnt_s64_m(svdup_n_u64(0), pg, op)
6993}
6994#[doc = "Count nonzero bits"]
6995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_m)"]
6996#[inline(always)]
6997#[target_feature(enable = "sve")]
6998#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
6999#[cfg_attr(test, assert_instr(cnt))]
7000pub fn svcnt_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t {
7001    unsafe { svcnt_s8_m(inactive, pg, op.as_signed()) }
7002}
7003#[doc = "Count nonzero bits"]
7004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_x)"]
7005#[inline(always)]
7006#[target_feature(enable = "sve")]
7007#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7008#[cfg_attr(test, assert_instr(cnt))]
7009pub fn svcnt_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t {
7010    svcnt_u8_m(op, pg, op)
7011}
7012#[doc = "Count nonzero bits"]
7013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_z)"]
7014#[inline(always)]
7015#[target_feature(enable = "sve")]
7016#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7017#[cfg_attr(test, assert_instr(cnt))]
7018pub fn svcnt_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t {
7019    svcnt_u8_m(svdup_n_u8(0), pg, op)
7020}
7021#[doc = "Count nonzero bits"]
7022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_m)"]
7023#[inline(always)]
7024#[target_feature(enable = "sve")]
7025#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7026#[cfg_attr(test, assert_instr(cnt))]
7027pub fn svcnt_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
7028    unsafe { svcnt_s16_m(inactive, pg, op.as_signed()) }
7029}
7030#[doc = "Count nonzero bits"]
7031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_x)"]
7032#[inline(always)]
7033#[target_feature(enable = "sve")]
7034#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7035#[cfg_attr(test, assert_instr(cnt))]
7036pub fn svcnt_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
7037    svcnt_u16_m(op, pg, op)
7038}
7039#[doc = "Count nonzero bits"]
7040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_z)"]
7041#[inline(always)]
7042#[target_feature(enable = "sve")]
7043#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7044#[cfg_attr(test, assert_instr(cnt))]
7045pub fn svcnt_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
7046    svcnt_u16_m(svdup_n_u16(0), pg, op)
7047}
7048#[doc = "Count nonzero bits"]
7049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_m)"]
7050#[inline(always)]
7051#[target_feature(enable = "sve")]
7052#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7053#[cfg_attr(test, assert_instr(cnt))]
7054pub fn svcnt_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
7055    unsafe { svcnt_s32_m(inactive, pg, op.as_signed()) }
7056}
7057#[doc = "Count nonzero bits"]
7058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_x)"]
7059#[inline(always)]
7060#[target_feature(enable = "sve")]
7061#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7062#[cfg_attr(test, assert_instr(cnt))]
7063pub fn svcnt_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
7064    svcnt_u32_m(op, pg, op)
7065}
7066#[doc = "Count nonzero bits"]
7067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_z)"]
7068#[inline(always)]
7069#[target_feature(enable = "sve")]
7070#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7071#[cfg_attr(test, assert_instr(cnt))]
7072pub fn svcnt_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
7073    svcnt_u32_m(svdup_n_u32(0), pg, op)
7074}
7075#[doc = "Count nonzero bits"]
7076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_m)"]
7077#[inline(always)]
7078#[target_feature(enable = "sve")]
7079#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7080#[cfg_attr(test, assert_instr(cnt))]
7081pub fn svcnt_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
7082    unsafe { svcnt_s64_m(inactive, pg, op.as_signed()) }
7083}
7084#[doc = "Count nonzero bits"]
7085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_x)"]
7086#[inline(always)]
7087#[target_feature(enable = "sve")]
7088#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7089#[cfg_attr(test, assert_instr(cnt))]
7090pub fn svcnt_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
7091    svcnt_u64_m(op, pg, op)
7092}
7093#[doc = "Count nonzero bits"]
7094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_z)"]
7095#[inline(always)]
7096#[target_feature(enable = "sve")]
7097#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7098#[cfg_attr(test, assert_instr(cnt))]
7099pub fn svcnt_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
7100    svcnt_u64_m(svdup_n_u64(0), pg, op)
7101}
7102#[doc = "Count the number of 8-bit elements in a vector"]
7103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)"]
7104#[inline(always)]
7105#[target_feature(enable = "sve")]
7106#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7107#[cfg_attr(test, assert_instr(rdvl))]
7108pub fn svcntb() -> u64 {
7109    svcntb_pat::<{ svpattern::SV_ALL }>()
7110}
7111#[doc = "Count the number of 16-bit elements in a vector"]
7112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)"]
7113#[inline(always)]
7114#[target_feature(enable = "sve")]
7115#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7116#[cfg_attr(test, assert_instr(cnth))]
7117pub fn svcnth() -> u64 {
7118    svcnth_pat::<{ svpattern::SV_ALL }>()
7119}
7120#[doc = "Count the number of 32-bit elements in a vector"]
7121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw)"]
7122#[inline(always)]
7123#[target_feature(enable = "sve")]
7124#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7125#[cfg_attr(test, assert_instr(cntw))]
7126pub fn svcntw() -> u64 {
7127    svcntw_pat::<{ svpattern::SV_ALL }>()
7128}
7129#[doc = "Count the number of 64-bit elements in a vector"]
7130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)"]
7131#[inline(always)]
7132#[target_feature(enable = "sve")]
7133#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7134#[cfg_attr(test, assert_instr(cntd))]
7135pub fn svcntd() -> u64 {
7136    svcntd_pat::<{ svpattern::SV_ALL }>()
7137}
7138#[doc = "Count the number of 8-bit elements in a vector"]
7139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb_pat)"]
7140#[inline(always)]
7141#[target_feature(enable = "sve")]
7142#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7143# [cfg_attr (test , assert_instr (rdvl , PATTERN = { svpattern :: SV_ALL }))]
7144# [cfg_attr (test , assert_instr (cntb , PATTERN = { svpattern :: SV_MUL4 }))]
7145pub fn svcntb_pat<const PATTERN: svpattern>() -> u64 {
7146    unsafe extern "unadjusted" {
7147        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntb")]
7148        fn _svcntb_pat(pattern: svpattern) -> i64;
7149    }
7150    unsafe { _svcntb_pat(PATTERN).as_unsigned() }
7151}
7152#[doc = "Count the number of 16-bit elements in a vector"]
7153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth_pat)"]
7154#[inline(always)]
7155#[target_feature(enable = "sve")]
7156#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7157# [cfg_attr (test , assert_instr (cnth , PATTERN = { svpattern :: SV_ALL }))]
7158pub fn svcnth_pat<const PATTERN: svpattern>() -> u64 {
7159    unsafe extern "unadjusted" {
7160        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnth")]
7161        fn _svcnth_pat(pattern: svpattern) -> i64;
7162    }
7163    unsafe { _svcnth_pat(PATTERN).as_unsigned() }
7164}
7165#[doc = "Count the number of 32-bit elements in a vector"]
7166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw_pat)"]
7167#[inline(always)]
7168#[target_feature(enable = "sve")]
7169#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7170# [cfg_attr (test , assert_instr (cntw , PATTERN = { svpattern :: SV_ALL }))]
7171pub fn svcntw_pat<const PATTERN: svpattern>() -> u64 {
7172    unsafe extern "unadjusted" {
7173        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntw")]
7174        fn _svcntw_pat(pattern: svpattern) -> i64;
7175    }
7176    unsafe { _svcntw_pat(PATTERN).as_unsigned() }
7177}
7178#[doc = "Count the number of 64-bit elements in a vector"]
7179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd_pat)"]
7180#[inline(always)]
7181#[target_feature(enable = "sve")]
7182#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7183# [cfg_attr (test , assert_instr (cntd , PATTERN = { svpattern :: SV_ALL }))]
7184pub fn svcntd_pat<const PATTERN: svpattern>() -> u64 {
7185    unsafe extern "unadjusted" {
7186        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntd")]
7187        fn _svcntd_pat(pattern: svpattern) -> i64;
7188    }
7189    unsafe { _svcntd_pat(PATTERN).as_unsigned() }
7190}
7191#[doc = "Count set predicate bits"]
7192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b8)"]
7193#[inline(always)]
7194#[target_feature(enable = "sve")]
7195#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7196#[cfg_attr(test, assert_instr(cntp))]
7197pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 {
7198    unsafe extern "unadjusted" {
7199        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv16i1")]
7200        fn _svcntp_b8(pg: svbool_t, op: svbool_t) -> i64;
7201    }
7202    unsafe { _svcntp_b8(pg, op).as_unsigned() }
7203}
7204#[doc = "Count set predicate bits"]
7205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b16)"]
7206#[inline(always)]
7207#[target_feature(enable = "sve")]
7208#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7209#[cfg_attr(test, assert_instr(cntp))]
7210pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 {
7211    unsafe extern "unadjusted" {
7212        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv8i1")]
7213        fn _svcntp_b16(pg: svbool8_t, op: svbool8_t) -> i64;
7214    }
7215    unsafe { _svcntp_b16(pg.sve_into(), op.sve_into()).as_unsigned() }
7216}
7217#[doc = "Count set predicate bits"]
7218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b32)"]
7219#[inline(always)]
7220#[target_feature(enable = "sve")]
7221#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7222#[cfg_attr(test, assert_instr(cntp))]
7223pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 {
7224    unsafe extern "unadjusted" {
7225        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv4i1")]
7226        fn _svcntp_b32(pg: svbool4_t, op: svbool4_t) -> i64;
7227    }
7228    unsafe { _svcntp_b32(pg.sve_into(), op.sve_into()).as_unsigned() }
7229}
7230#[doc = "Count set predicate bits"]
7231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b64)"]
7232#[inline(always)]
7233#[target_feature(enable = "sve")]
7234#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7235#[cfg_attr(test, assert_instr(cntp))]
7236pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 {
7237    unsafe extern "unadjusted" {
7238        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv2i1")]
7239        fn _svcntp_b64(pg: svbool2_t, op: svbool2_t) -> i64;
7240    }
7241    unsafe { _svcntp_b64(pg.sve_into(), op.sve_into()).as_unsigned() }
7242}
7243#[doc = "Shuffle active elements of vector to the right and fill with zero"]
7244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f32])"]
7245#[inline(always)]
7246#[target_feature(enable = "sve")]
7247#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7248#[cfg_attr(test, assert_instr(compact))]
7249pub fn svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
7250    unsafe extern "unadjusted" {
7251        #[cfg_attr(
7252            target_arch = "aarch64",
7253            link_name = "llvm.aarch64.sve.compact.nxv4f32"
7254        )]
7255        fn _svcompact_f32(pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
7256    }
7257    unsafe { _svcompact_f32(pg.sve_into(), op) }
7258}
7259#[doc = "Shuffle active elements of vector to the right and fill with zero"]
7260#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f64])"]
7261#[inline(always)]
7262#[target_feature(enable = "sve")]
7263#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7264#[cfg_attr(test, assert_instr(compact))]
7265pub fn svcompact_f64(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
7266    unsafe extern "unadjusted" {
7267        #[cfg_attr(
7268            target_arch = "aarch64",
7269            link_name = "llvm.aarch64.sve.compact.nxv2f64"
7270        )]
7271        fn _svcompact_f64(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
7272    }
7273    unsafe { _svcompact_f64(pg.sve_into(), op) }
7274}
7275#[doc = "Shuffle active elements of vector to the right and fill with zero"]
7276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s32])"]
7277#[inline(always)]
7278#[target_feature(enable = "sve")]
7279#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7280#[cfg_attr(test, assert_instr(compact))]
7281pub fn svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t {
7282    unsafe extern "unadjusted" {
7283        #[cfg_attr(
7284            target_arch = "aarch64",
7285            link_name = "llvm.aarch64.sve.compact.nxv4i32"
7286        )]
7287        fn _svcompact_s32(pg: svbool4_t, op: svint32_t) -> svint32_t;
7288    }
7289    unsafe { _svcompact_s32(pg.sve_into(), op) }
7290}
7291#[doc = "Shuffle active elements of vector to the right and fill with zero"]
7292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s64])"]
7293#[inline(always)]
7294#[target_feature(enable = "sve")]
7295#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7296#[cfg_attr(test, assert_instr(compact))]
7297pub fn svcompact_s64(pg: svbool_t, op: svint64_t) -> svint64_t {
7298    unsafe extern "unadjusted" {
7299        #[cfg_attr(
7300            target_arch = "aarch64",
7301            link_name = "llvm.aarch64.sve.compact.nxv2i64"
7302        )]
7303        fn _svcompact_s64(pg: svbool2_t, op: svint64_t) -> svint64_t;
7304    }
7305    unsafe { _svcompact_s64(pg.sve_into(), op) }
7306}
7307#[doc = "Shuffle active elements of vector to the right and fill with zero"]
7308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u32])"]
7309#[inline(always)]
7310#[target_feature(enable = "sve")]
7311#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7312#[cfg_attr(test, assert_instr(compact))]
7313pub fn svcompact_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t {
7314    unsafe { svcompact_s32(pg, op.as_signed()).as_unsigned() }
7315}
7316#[doc = "Shuffle active elements of vector to the right and fill with zero"]
7317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u64])"]
7318#[inline(always)]
7319#[target_feature(enable = "sve")]
7320#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7321#[cfg_attr(test, assert_instr(compact))]
7322pub fn svcompact_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t {
7323    unsafe { svcompact_s64(pg, op.as_signed()).as_unsigned() }
7324}
7325#[doc = "Create a tuple of two vectors"]
7326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f32])"]
7327#[inline(always)]
7328#[target_feature(enable = "sve")]
7329#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7330pub fn svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t {
7331    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) }
7332}
7333#[doc = "Create a tuple of two vectors"]
7334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f64])"]
7335#[inline(always)]
7336#[target_feature(enable = "sve")]
7337#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7338pub fn svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t {
7339    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) }
7340}
7341#[doc = "Create a tuple of two vectors"]
7342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s8])"]
7343#[inline(always)]
7344#[target_feature(enable = "sve")]
7345#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7346pub fn svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t {
7347    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) }
7348}
7349#[doc = "Create a tuple of two vectors"]
7350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s16])"]
7351#[inline(always)]
7352#[target_feature(enable = "sve")]
7353#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7354pub fn svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t {
7355    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) }
7356}
7357#[doc = "Create a tuple of two vectors"]
7358#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s32])"]
7359#[inline(always)]
7360#[target_feature(enable = "sve")]
7361#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7362pub fn svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t {
7363    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) }
7364}
7365#[doc = "Create a tuple of two vectors"]
7366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s64])"]
7367#[inline(always)]
7368#[target_feature(enable = "sve")]
7369#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7370pub fn svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t {
7371    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) }
7372}
7373#[doc = "Create a tuple of two vectors"]
7374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u8])"]
7375#[inline(always)]
7376#[target_feature(enable = "sve")]
7377#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7378pub fn svcreate2_u8(x0: svuint8_t, x1: svuint8_t) -> svuint8x2_t {
7379    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) }
7380}
7381#[doc = "Create a tuple of two vectors"]
7382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u16])"]
7383#[inline(always)]
7384#[target_feature(enable = "sve")]
7385#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7386pub fn svcreate2_u16(x0: svuint16_t, x1: svuint16_t) -> svuint16x2_t {
7387    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) }
7388}
7389#[doc = "Create a tuple of two vectors"]
7390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u32])"]
7391#[inline(always)]
7392#[target_feature(enable = "sve")]
7393#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7394pub fn svcreate2_u32(x0: svuint32_t, x1: svuint32_t) -> svuint32x2_t {
7395    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) }
7396}
7397#[doc = "Create a tuple of two vectors"]
7398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u64])"]
7399#[inline(always)]
7400#[target_feature(enable = "sve")]
7401#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7402pub fn svcreate2_u64(x0: svuint64_t, x1: svuint64_t) -> svuint64x2_t {
7403    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) }
7404}
7405#[doc = "Create a tuple of three vectors"]
7406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f32])"]
7407#[inline(always)]
7408#[target_feature(enable = "sve")]
7409#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7410pub fn svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t {
7411    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) }
7412}
7413#[doc = "Create a tuple of three vectors"]
7414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f64])"]
7415#[inline(always)]
7416#[target_feature(enable = "sve")]
7417#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7418pub fn svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svfloat64x3_t {
7419    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) }
7420}
7421#[doc = "Create a tuple of three vectors"]
7422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s8])"]
7423#[inline(always)]
7424#[target_feature(enable = "sve")]
7425#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7426pub fn svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t {
7427    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) }
7428}
7429#[doc = "Create a tuple of three vectors"]
7430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s16])"]
7431#[inline(always)]
7432#[target_feature(enable = "sve")]
7433#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7434pub fn svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t {
7435    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) }
7436}
7437#[doc = "Create a tuple of three vectors"]
7438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s32])"]
7439#[inline(always)]
7440#[target_feature(enable = "sve")]
7441#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7442pub fn svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t {
7443    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) }
7444}
7445#[doc = "Create a tuple of three vectors"]
7446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s64])"]
7447#[inline(always)]
7448#[target_feature(enable = "sve")]
7449#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7450pub fn svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t {
7451    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) }
7452}
7453#[doc = "Create a tuple of three vectors"]
7454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u8])"]
7455#[inline(always)]
7456#[target_feature(enable = "sve")]
7457#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7458pub fn svcreate3_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t) -> svuint8x3_t {
7459    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) }
7460}
7461#[doc = "Create a tuple of three vectors"]
7462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u16])"]
7463#[inline(always)]
7464#[target_feature(enable = "sve")]
7465#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7466pub fn svcreate3_u16(x0: svuint16_t, x1: svuint16_t, x2: svuint16_t) -> svuint16x3_t {
7467    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) }
7468}
7469#[doc = "Create a tuple of three vectors"]
7470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u32])"]
7471#[inline(always)]
7472#[target_feature(enable = "sve")]
7473#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7474pub fn svcreate3_u32(x0: svuint32_t, x1: svuint32_t, x2: svuint32_t) -> svuint32x3_t {
7475    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) }
7476}
7477#[doc = "Create a tuple of three vectors"]
7478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u64])"]
7479#[inline(always)]
7480#[target_feature(enable = "sve")]
7481#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7482pub fn svcreate3_u64(x0: svuint64_t, x1: svuint64_t, x2: svuint64_t) -> svuint64x3_t {
7483    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) }
7484}
7485#[doc = "Create a tuple of four vectors"]
7486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f32])"]
7487#[inline(always)]
7488#[target_feature(enable = "sve")]
7489#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7490pub fn svcreate4_f32(
7491    x0: svfloat32_t,
7492    x1: svfloat32_t,
7493    x2: svfloat32_t,
7494    x3: svfloat32_t,
7495) -> svfloat32x4_t {
7496    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) }
7497}
7498#[doc = "Create a tuple of four vectors"]
7499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f64])"]
7500#[inline(always)]
7501#[target_feature(enable = "sve")]
7502#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7503pub fn svcreate4_f64(
7504    x0: svfloat64_t,
7505    x1: svfloat64_t,
7506    x2: svfloat64_t,
7507    x3: svfloat64_t,
7508) -> svfloat64x4_t {
7509    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) }
7510}
7511#[doc = "Create a tuple of four vectors"]
7512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s8])"]
7513#[inline(always)]
7514#[target_feature(enable = "sve")]
7515#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7516pub fn svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> svint8x4_t {
7517    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) }
7518}
7519#[doc = "Create a tuple of four vectors"]
7520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s16])"]
7521#[inline(always)]
7522#[target_feature(enable = "sve")]
7523#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7524pub fn svcreate4_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t, x3: svint16_t) -> svint16x4_t {
7525    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) }
7526}
7527#[doc = "Create a tuple of four vectors"]
7528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s32])"]
7529#[inline(always)]
7530#[target_feature(enable = "sve")]
7531#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7532pub fn svcreate4_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t, x3: svint32_t) -> svint32x4_t {
7533    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) }
7534}
7535#[doc = "Create a tuple of four vectors"]
7536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s64])"]
7537#[inline(always)]
7538#[target_feature(enable = "sve")]
7539#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7540pub fn svcreate4_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t, x3: svint64_t) -> svint64x4_t {
7541    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) }
7542}
7543#[doc = "Create a tuple of four vectors"]
7544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u8])"]
7545#[inline(always)]
7546#[target_feature(enable = "sve")]
7547#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7548pub fn svcreate4_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t, x3: svuint8_t) -> svuint8x4_t {
7549    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) }
7550}
7551#[doc = "Create a tuple of four vectors"]
7552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u16])"]
7553#[inline(always)]
7554#[target_feature(enable = "sve")]
7555#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7556pub fn svcreate4_u16(
7557    x0: svuint16_t,
7558    x1: svuint16_t,
7559    x2: svuint16_t,
7560    x3: svuint16_t,
7561) -> svuint16x4_t {
7562    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) }
7563}
7564#[doc = "Create a tuple of four vectors"]
7565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u32])"]
7566#[inline(always)]
7567#[target_feature(enable = "sve")]
7568#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7569pub fn svcreate4_u32(
7570    x0: svuint32_t,
7571    x1: svuint32_t,
7572    x2: svuint32_t,
7573    x3: svuint32_t,
7574) -> svuint32x4_t {
7575    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) }
7576}
7577#[doc = "Create a tuple of four vectors"]
7578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u64])"]
7579#[inline(always)]
7580#[target_feature(enable = "sve")]
7581#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7582pub fn svcreate4_u64(
7583    x0: svuint64_t,
7584    x1: svuint64_t,
7585    x2: svuint64_t,
7586    x3: svuint64_t,
7587) -> svuint64x4_t {
7588    unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) }
7589}
7590#[doc = "Floating-point convert"]
7591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_m)"]
7592#[inline(always)]
7593#[target_feature(enable = "sve")]
7594#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7595#[cfg_attr(test, assert_instr(fcvt))]
7596pub fn svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
7597    unsafe extern "unadjusted" {
7598        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f32f64")]
7599        fn _svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t;
7600    }
7601    unsafe { _svcvt_f32_f64_m(inactive, pg.sve_into(), op) }
7602}
7603#[doc = "Floating-point convert"]
7604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_x)"]
7605#[inline(always)]
7606#[target_feature(enable = "sve")]
7607#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7608#[cfg_attr(test, assert_instr(fcvt))]
7609pub fn svcvt_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
7610    unsafe { svcvt_f32_f64_m(transmute_unchecked(op), pg, op) }
7611}
7612#[doc = "Floating-point convert"]
7613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_z)"]
7614#[inline(always)]
7615#[target_feature(enable = "sve")]
7616#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7617#[cfg_attr(test, assert_instr(fcvt))]
7618pub fn svcvt_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
7619    svcvt_f32_f64_m(svdup_n_f32(0.0), pg, op)
7620}
7621#[doc = "Floating-point convert"]
7622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_m)"]
7623#[inline(always)]
7624#[target_feature(enable = "sve")]
7625#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7626#[cfg_attr(test, assert_instr(fcvt))]
7627pub fn svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t {
7628    unsafe extern "unadjusted" {
7629        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f64f32")]
7630        fn _svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) -> svfloat64_t;
7631    }
7632    unsafe { _svcvt_f64_f32_m(inactive, pg.sve_into(), op) }
7633}
7634#[doc = "Floating-point convert"]
7635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_x)"]
7636#[inline(always)]
7637#[target_feature(enable = "sve")]
7638#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7639#[cfg_attr(test, assert_instr(fcvt))]
7640pub fn svcvt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t {
7641    unsafe { svcvt_f64_f32_m(transmute_unchecked(op), pg, op) }
7642}
7643#[doc = "Floating-point convert"]
7644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_z)"]
7645#[inline(always)]
7646#[target_feature(enable = "sve")]
7647#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7648#[cfg_attr(test, assert_instr(fcvt))]
7649pub fn svcvt_f64_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat64_t {
7650    svcvt_f64_f32_m(svdup_n_f64(0.0), pg, op)
7651}
7652#[doc = "Floating-point convert"]
7653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"]
7654#[inline(always)]
7655#[target_feature(enable = "sve")]
7656#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7657#[cfg_attr(test, assert_instr(scvtf))]
7658pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t {
7659    unsafe extern "unadjusted" {
7660        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i32")]
7661        fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t;
7662    }
7663    unsafe { _svcvt_f32_s32_m(inactive, pg.sve_into(), op) }
7664}
7665#[doc = "Floating-point convert"]
7666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"]
7667#[inline(always)]
7668#[target_feature(enable = "sve")]
7669#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7670#[cfg_attr(test, assert_instr(scvtf))]
7671pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t {
7672    unsafe { svcvt_f32_s32_m(transmute_unchecked(op), pg, op) }
7673}
7674#[doc = "Floating-point convert"]
7675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"]
7676#[inline(always)]
7677#[target_feature(enable = "sve")]
7678#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7679#[cfg_attr(test, assert_instr(scvtf))]
7680pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t {
7681    svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op)
7682}
7683#[doc = "Floating-point convert"]
7684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"]
7685#[inline(always)]
7686#[target_feature(enable = "sve")]
7687#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7688#[cfg_attr(test, assert_instr(scvtf))]
7689pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t {
7690    unsafe extern "unadjusted" {
7691        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")]
7692        fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t;
7693    }
7694    unsafe { _svcvt_f32_s64_m(inactive, pg.sve_into(), op) }
7695}
7696#[doc = "Floating-point convert"]
7697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"]
7698#[inline(always)]
7699#[target_feature(enable = "sve")]
7700#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7701#[cfg_attr(test, assert_instr(scvtf))]
7702pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t {
7703    unsafe { svcvt_f32_s64_m(transmute_unchecked(op), pg, op) }
7704}
7705#[doc = "Floating-point convert"]
7706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"]
7707#[inline(always)]
7708#[target_feature(enable = "sve")]
7709#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7710#[cfg_attr(test, assert_instr(scvtf))]
7711pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t {
7712    svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op)
7713}
7714#[doc = "Floating-point convert"]
7715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"]
7716#[inline(always)]
7717#[target_feature(enable = "sve")]
7718#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7719#[cfg_attr(test, assert_instr(ucvtf))]
7720pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t {
7721    unsafe extern "unadjusted" {
7722        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i32")]
7723        fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t;
7724    }
7725    unsafe { _svcvt_f32_u32_m(inactive, pg.sve_into(), op.as_signed()) }
7726}
7727#[doc = "Floating-point convert"]
7728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"]
7729#[inline(always)]
7730#[target_feature(enable = "sve")]
7731#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7732#[cfg_attr(test, assert_instr(ucvtf))]
7733pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t {
7734    unsafe { svcvt_f32_u32_m(transmute_unchecked(op), pg, op) }
7735}
7736#[doc = "Floating-point convert"]
7737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"]
7738#[inline(always)]
7739#[target_feature(enable = "sve")]
7740#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7741#[cfg_attr(test, assert_instr(ucvtf))]
7742pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t {
7743    svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op)
7744}
7745#[doc = "Floating-point convert"]
7746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"]
7747#[inline(always)]
7748#[target_feature(enable = "sve")]
7749#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7750#[cfg_attr(test, assert_instr(ucvtf))]
7751pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t {
7752    unsafe extern "unadjusted" {
7753        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")]
7754        fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t;
7755    }
7756    unsafe { _svcvt_f32_u64_m(inactive, pg.sve_into(), op.as_signed()) }
7757}
7758#[doc = "Floating-point convert"]
7759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"]
7760#[inline(always)]
7761#[target_feature(enable = "sve")]
7762#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7763#[cfg_attr(test, assert_instr(ucvtf))]
7764pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t {
7765    unsafe { svcvt_f32_u64_m(transmute_unchecked(op), pg, op) }
7766}
7767#[doc = "Floating-point convert"]
7768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"]
7769#[inline(always)]
7770#[target_feature(enable = "sve")]
7771#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7772#[cfg_attr(test, assert_instr(ucvtf))]
7773pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t {
7774    svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op)
7775}
7776#[doc = "Floating-point convert"]
7777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"]
7778#[inline(always)]
7779#[target_feature(enable = "sve")]
7780#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7781#[cfg_attr(test, assert_instr(scvtf))]
7782pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t {
7783    unsafe extern "unadjusted" {
7784        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f64i32")]
7785        fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t;
7786    }
7787    unsafe { _svcvt_f64_s32_m(inactive, pg.sve_into(), op) }
7788}
7789#[doc = "Floating-point convert"]
7790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"]
7791#[inline(always)]
7792#[target_feature(enable = "sve")]
7793#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7794#[cfg_attr(test, assert_instr(scvtf))]
7795pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t {
7796    unsafe { svcvt_f64_s32_m(transmute_unchecked(op), pg, op) }
7797}
7798#[doc = "Floating-point convert"]
7799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"]
7800#[inline(always)]
7801#[target_feature(enable = "sve")]
7802#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7803#[cfg_attr(test, assert_instr(scvtf))]
7804pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t {
7805    svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op)
7806}
7807#[doc = "Floating-point convert"]
7808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"]
7809#[inline(always)]
7810#[target_feature(enable = "sve")]
7811#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7812#[cfg_attr(test, assert_instr(scvtf))]
7813pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t {
7814    unsafe extern "unadjusted" {
7815        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f64i64")]
7816        fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t;
7817    }
7818    unsafe { _svcvt_f64_s64_m(inactive, pg.sve_into(), op) }
7819}
7820#[doc = "Floating-point convert"]
7821#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"]
7822#[inline(always)]
7823#[target_feature(enable = "sve")]
7824#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7825#[cfg_attr(test, assert_instr(scvtf))]
7826pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t {
7827    unsafe { svcvt_f64_s64_m(transmute_unchecked(op), pg, op) }
7828}
7829#[doc = "Floating-point convert"]
7830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"]
7831#[inline(always)]
7832#[target_feature(enable = "sve")]
7833#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7834#[cfg_attr(test, assert_instr(scvtf))]
7835pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t {
7836    svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op)
7837}
7838#[doc = "Floating-point convert"]
7839#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"]
7840#[inline(always)]
7841#[target_feature(enable = "sve")]
7842#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7843#[cfg_attr(test, assert_instr(ucvtf))]
7844pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t {
7845    unsafe extern "unadjusted" {
7846        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f64i32")]
7847        fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t;
7848    }
7849    unsafe { _svcvt_f64_u32_m(inactive, pg.sve_into(), op.as_signed()) }
7850}
7851#[doc = "Floating-point convert"]
7852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"]
7853#[inline(always)]
7854#[target_feature(enable = "sve")]
7855#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7856#[cfg_attr(test, assert_instr(ucvtf))]
7857pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t {
7858    unsafe { svcvt_f64_u32_m(transmute_unchecked(op), pg, op) }
7859}
7860#[doc = "Floating-point convert"]
7861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"]
7862#[inline(always)]
7863#[target_feature(enable = "sve")]
7864#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7865#[cfg_attr(test, assert_instr(ucvtf))]
7866pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t {
7867    svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op)
7868}
7869#[doc = "Floating-point convert"]
7870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"]
7871#[inline(always)]
7872#[target_feature(enable = "sve")]
7873#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7874#[cfg_attr(test, assert_instr(ucvtf))]
7875pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t {
7876    unsafe extern "unadjusted" {
7877        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f64i64")]
7878        fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t;
7879    }
7880    unsafe { _svcvt_f64_u64_m(inactive, pg.sve_into(), op.as_signed()) }
7881}
7882#[doc = "Floating-point convert"]
7883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"]
7884#[inline(always)]
7885#[target_feature(enable = "sve")]
7886#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7887#[cfg_attr(test, assert_instr(ucvtf))]
7888pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t {
7889    unsafe { svcvt_f64_u64_m(transmute_unchecked(op), pg, op) }
7890}
7891#[doc = "Floating-point convert"]
7892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"]
7893#[inline(always)]
7894#[target_feature(enable = "sve")]
7895#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7896#[cfg_attr(test, assert_instr(ucvtf))]
7897pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t {
7898    svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op)
7899}
7900#[doc = "Floating-point convert"]
7901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_m)"]
7902#[inline(always)]
7903#[target_feature(enable = "sve")]
7904#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7905#[cfg_attr(test, assert_instr(fcvtzs))]
7906pub fn svcvt_s32_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t {
7907    unsafe extern "unadjusted" {
7908        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f32")]
7909        fn _svcvt_s32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t;
7910    }
7911    unsafe { _svcvt_s32_f32_m(inactive, pg.sve_into(), op) }
7912}
7913#[doc = "Floating-point convert"]
7914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_x)"]
7915#[inline(always)]
7916#[target_feature(enable = "sve")]
7917#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7918#[cfg_attr(test, assert_instr(fcvtzs))]
7919pub fn svcvt_s32_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t {
7920    unsafe { svcvt_s32_f32_m(transmute_unchecked(op), pg, op) }
7921}
7922#[doc = "Floating-point convert"]
7923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_z)"]
7924#[inline(always)]
7925#[target_feature(enable = "sve")]
7926#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7927#[cfg_attr(test, assert_instr(fcvtzs))]
7928pub fn svcvt_s32_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t {
7929    svcvt_s32_f32_m(svdup_n_s32(0), pg, op)
7930}
7931#[doc = "Floating-point convert"]
7932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_m)"]
7933#[inline(always)]
7934#[target_feature(enable = "sve")]
7935#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7936#[cfg_attr(test, assert_instr(fcvtzs))]
7937pub fn svcvt_s32_f64_m(inactive: svint32_t, pg: svbool_t, op: svfloat64_t) -> svint32_t {
7938    unsafe extern "unadjusted" {
7939        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f64")]
7940        fn _svcvt_s32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t;
7941    }
7942    unsafe { _svcvt_s32_f64_m(inactive, pg.sve_into(), op) }
7943}
7944#[doc = "Floating-point convert"]
7945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_x)"]
7946#[inline(always)]
7947#[target_feature(enable = "sve")]
7948#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7949#[cfg_attr(test, assert_instr(fcvtzs))]
7950pub fn svcvt_s32_f64_x(pg: svbool_t, op: svfloat64_t) -> svint32_t {
7951    unsafe { svcvt_s32_f64_m(transmute_unchecked(op), pg, op) }
7952}
7953#[doc = "Floating-point convert"]
7954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_z)"]
7955#[inline(always)]
7956#[target_feature(enable = "sve")]
7957#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7958#[cfg_attr(test, assert_instr(fcvtzs))]
7959pub fn svcvt_s32_f64_z(pg: svbool_t, op: svfloat64_t) -> svint32_t {
7960    svcvt_s32_f64_m(svdup_n_s32(0), pg, op)
7961}
7962#[doc = "Floating-point convert"]
7963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_m)"]
7964#[inline(always)]
7965#[target_feature(enable = "sve")]
7966#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7967#[cfg_attr(test, assert_instr(fcvtzs))]
7968pub fn svcvt_s64_f32_m(inactive: svint64_t, pg: svbool_t, op: svfloat32_t) -> svint64_t {
7969    unsafe extern "unadjusted" {
7970        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f32")]
7971        fn _svcvt_s64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t;
7972    }
7973    unsafe { _svcvt_s64_f32_m(inactive, pg.sve_into(), op) }
7974}
7975#[doc = "Floating-point convert"]
7976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_x)"]
7977#[inline(always)]
7978#[target_feature(enable = "sve")]
7979#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7980#[cfg_attr(test, assert_instr(fcvtzs))]
7981pub fn svcvt_s64_f32_x(pg: svbool_t, op: svfloat32_t) -> svint64_t {
7982    unsafe { svcvt_s64_f32_m(transmute_unchecked(op), pg, op) }
7983}
7984#[doc = "Floating-point convert"]
7985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_z)"]
7986#[inline(always)]
7987#[target_feature(enable = "sve")]
7988#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7989#[cfg_attr(test, assert_instr(fcvtzs))]
7990pub fn svcvt_s64_f32_z(pg: svbool_t, op: svfloat32_t) -> svint64_t {
7991    svcvt_s64_f32_m(svdup_n_s64(0), pg, op)
7992}
7993#[doc = "Floating-point convert"]
7994#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_m)"]
7995#[inline(always)]
7996#[target_feature(enable = "sve")]
7997#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
7998#[cfg_attr(test, assert_instr(fcvtzs))]
7999pub fn svcvt_s64_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t {
8000    unsafe extern "unadjusted" {
8001        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f64")]
8002        fn _svcvt_s64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t;
8003    }
8004    unsafe { _svcvt_s64_f64_m(inactive, pg.sve_into(), op) }
8005}
8006#[doc = "Floating-point convert"]
8007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_x)"]
8008#[inline(always)]
8009#[target_feature(enable = "sve")]
8010#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8011#[cfg_attr(test, assert_instr(fcvtzs))]
8012pub fn svcvt_s64_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t {
8013    unsafe { svcvt_s64_f64_m(transmute_unchecked(op), pg, op) }
8014}
8015#[doc = "Floating-point convert"]
8016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_z)"]
8017#[inline(always)]
8018#[target_feature(enable = "sve")]
8019#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8020#[cfg_attr(test, assert_instr(fcvtzs))]
8021pub fn svcvt_s64_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t {
8022    svcvt_s64_f64_m(svdup_n_s64(0), pg, op)
8023}
8024#[doc = "Floating-point convert"]
8025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_m)"]
8026#[inline(always)]
8027#[target_feature(enable = "sve")]
8028#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8029#[cfg_attr(test, assert_instr(fcvtzu))]
8030pub fn svcvt_u32_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t {
8031    unsafe extern "unadjusted" {
8032        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f32")]
8033        fn _svcvt_u32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t;
8034    }
8035    unsafe { _svcvt_u32_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
8036}
8037#[doc = "Floating-point convert"]
8038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_x)"]
8039#[inline(always)]
8040#[target_feature(enable = "sve")]
8041#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8042#[cfg_attr(test, assert_instr(fcvtzu))]
8043pub fn svcvt_u32_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t {
8044    unsafe { svcvt_u32_f32_m(transmute_unchecked(op), pg, op) }
8045}
8046#[doc = "Floating-point convert"]
8047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_z)"]
8048#[inline(always)]
8049#[target_feature(enable = "sve")]
8050#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8051#[cfg_attr(test, assert_instr(fcvtzu))]
8052pub fn svcvt_u32_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t {
8053    svcvt_u32_f32_m(svdup_n_u32(0), pg, op)
8054}
8055#[doc = "Floating-point convert"]
8056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_m)"]
8057#[inline(always)]
8058#[target_feature(enable = "sve")]
8059#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8060#[cfg_attr(test, assert_instr(fcvtzu))]
8061pub fn svcvt_u32_f64_m(inactive: svuint32_t, pg: svbool_t, op: svfloat64_t) -> svuint32_t {
8062    unsafe extern "unadjusted" {
8063        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f64")]
8064        fn _svcvt_u32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t;
8065    }
8066    unsafe { _svcvt_u32_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
8067}
8068#[doc = "Floating-point convert"]
8069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_x)"]
8070#[inline(always)]
8071#[target_feature(enable = "sve")]
8072#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8073#[cfg_attr(test, assert_instr(fcvtzu))]
8074pub fn svcvt_u32_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint32_t {
8075    unsafe { svcvt_u32_f64_m(transmute_unchecked(op), pg, op) }
8076}
8077#[doc = "Floating-point convert"]
8078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_z)"]
8079#[inline(always)]
8080#[target_feature(enable = "sve")]
8081#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8082#[cfg_attr(test, assert_instr(fcvtzu))]
8083pub fn svcvt_u32_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint32_t {
8084    svcvt_u32_f64_m(svdup_n_u32(0), pg, op)
8085}
8086#[doc = "Floating-point convert"]
8087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_m)"]
8088#[inline(always)]
8089#[target_feature(enable = "sve")]
8090#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8091#[cfg_attr(test, assert_instr(fcvtzu))]
8092pub fn svcvt_u64_f32_m(inactive: svuint64_t, pg: svbool_t, op: svfloat32_t) -> svuint64_t {
8093    unsafe extern "unadjusted" {
8094        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f32")]
8095        fn _svcvt_u64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t;
8096    }
8097    unsafe { _svcvt_u64_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
8098}
8099#[doc = "Floating-point convert"]
8100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_x)"]
8101#[inline(always)]
8102#[target_feature(enable = "sve")]
8103#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8104#[cfg_attr(test, assert_instr(fcvtzu))]
8105pub fn svcvt_u64_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint64_t {
8106    unsafe { svcvt_u64_f32_m(transmute_unchecked(op), pg, op) }
8107}
8108#[doc = "Floating-point convert"]
8109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_z)"]
8110#[inline(always)]
8111#[target_feature(enable = "sve")]
8112#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8113#[cfg_attr(test, assert_instr(fcvtzu))]
8114pub fn svcvt_u64_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint64_t {
8115    svcvt_u64_f32_m(svdup_n_u64(0), pg, op)
8116}
8117#[doc = "Floating-point convert"]
8118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_m)"]
8119#[inline(always)]
8120#[target_feature(enable = "sve")]
8121#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8122#[cfg_attr(test, assert_instr(fcvtzu))]
8123pub fn svcvt_u64_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t {
8124    unsafe extern "unadjusted" {
8125        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f64")]
8126        fn _svcvt_u64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t;
8127    }
8128    unsafe { _svcvt_u64_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() }
8129}
8130#[doc = "Floating-point convert"]
8131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_x)"]
8132#[inline(always)]
8133#[target_feature(enable = "sve")]
8134#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8135#[cfg_attr(test, assert_instr(fcvtzu))]
8136pub fn svcvt_u64_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t {
8137    unsafe { svcvt_u64_f64_m(transmute_unchecked(op), pg, op) }
8138}
8139#[doc = "Floating-point convert"]
8140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_z)"]
8141#[inline(always)]
8142#[target_feature(enable = "sve")]
8143#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8144#[cfg_attr(test, assert_instr(fcvtzu))]
8145pub fn svcvt_u64_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t {
8146    svcvt_u64_f64_m(svdup_n_u64(0), pg, op)
8147}
8148#[doc = "Divide"]
8149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_m)"]
8150#[inline(always)]
8151#[target_feature(enable = "sve")]
8152#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8153#[cfg_attr(test, assert_instr(fdiv))]
8154pub fn svdiv_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8155    unsafe extern "unadjusted" {
8156        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv4f32")]
8157        fn _svdiv_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
8158    }
8159    unsafe { _svdiv_f32_m(pg.sve_into(), op1, op2) }
8160}
8161#[doc = "Divide"]
8162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_m)"]
8163#[inline(always)]
8164#[target_feature(enable = "sve")]
8165#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8166#[cfg_attr(test, assert_instr(fdiv))]
8167pub fn svdiv_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
8168    svdiv_f32_m(pg, op1, svdup_n_f32(op2))
8169}
8170#[doc = "Divide"]
8171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_x)"]
8172#[inline(always)]
8173#[target_feature(enable = "sve")]
8174#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8175#[cfg_attr(test, assert_instr(fdiv))]
8176pub fn svdiv_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8177    svdiv_f32_m(pg, op1, op2)
8178}
8179#[doc = "Divide"]
8180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_x)"]
8181#[inline(always)]
8182#[target_feature(enable = "sve")]
8183#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8184#[cfg_attr(test, assert_instr(fdiv))]
8185pub fn svdiv_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
8186    svdiv_f32_x(pg, op1, svdup_n_f32(op2))
8187}
8188#[doc = "Divide"]
8189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_z)"]
8190#[inline(always)]
8191#[target_feature(enable = "sve")]
8192#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8193#[cfg_attr(test, assert_instr(fdiv))]
8194pub fn svdiv_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8195    svdiv_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
8196}
8197#[doc = "Divide"]
8198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_z)"]
8199#[inline(always)]
8200#[target_feature(enable = "sve")]
8201#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8202#[cfg_attr(test, assert_instr(fdiv))]
8203pub fn svdiv_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
8204    svdiv_f32_z(pg, op1, svdup_n_f32(op2))
8205}
8206#[doc = "Divide"]
8207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_m)"]
8208#[inline(always)]
8209#[target_feature(enable = "sve")]
8210#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8211#[cfg_attr(test, assert_instr(fdiv))]
8212pub fn svdiv_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8213    unsafe extern "unadjusted" {
8214        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv2f64")]
8215        fn _svdiv_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
8216    }
8217    unsafe { _svdiv_f64_m(pg.sve_into(), op1, op2) }
8218}
8219#[doc = "Divide"]
8220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_m)"]
8221#[inline(always)]
8222#[target_feature(enable = "sve")]
8223#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8224#[cfg_attr(test, assert_instr(fdiv))]
8225pub fn svdiv_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
8226    svdiv_f64_m(pg, op1, svdup_n_f64(op2))
8227}
8228#[doc = "Divide"]
8229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_x)"]
8230#[inline(always)]
8231#[target_feature(enable = "sve")]
8232#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8233#[cfg_attr(test, assert_instr(fdiv))]
8234pub fn svdiv_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8235    svdiv_f64_m(pg, op1, op2)
8236}
8237#[doc = "Divide"]
8238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_x)"]
8239#[inline(always)]
8240#[target_feature(enable = "sve")]
8241#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8242#[cfg_attr(test, assert_instr(fdiv))]
8243pub fn svdiv_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
8244    svdiv_f64_x(pg, op1, svdup_n_f64(op2))
8245}
8246#[doc = "Divide"]
8247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_z)"]
8248#[inline(always)]
8249#[target_feature(enable = "sve")]
8250#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8251#[cfg_attr(test, assert_instr(fdiv))]
8252pub fn svdiv_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8253    svdiv_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
8254}
8255#[doc = "Divide"]
8256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_z)"]
8257#[inline(always)]
8258#[target_feature(enable = "sve")]
8259#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8260#[cfg_attr(test, assert_instr(fdiv))]
8261pub fn svdiv_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
8262    svdiv_f64_z(pg, op1, svdup_n_f64(op2))
8263}
8264#[doc = "Divide"]
8265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"]
8266#[inline(always)]
8267#[target_feature(enable = "sve")]
8268#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8269#[cfg_attr(test, assert_instr(sdiv))]
8270pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
8271    unsafe extern "unadjusted" {
8272        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")]
8273        fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
8274    }
8275    unsafe { _svdiv_s32_m(pg.sve_into(), op1, op2) }
8276}
8277#[doc = "Divide"]
8278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"]
8279#[inline(always)]
8280#[target_feature(enable = "sve")]
8281#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8282#[cfg_attr(test, assert_instr(sdiv))]
8283pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
8284    svdiv_s32_m(pg, op1, svdup_n_s32(op2))
8285}
8286#[doc = "Divide"]
8287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"]
8288#[inline(always)]
8289#[target_feature(enable = "sve")]
8290#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8291#[cfg_attr(test, assert_instr(sdiv))]
8292pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
8293    svdiv_s32_m(pg, op1, op2)
8294}
8295#[doc = "Divide"]
8296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"]
8297#[inline(always)]
8298#[target_feature(enable = "sve")]
8299#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8300#[cfg_attr(test, assert_instr(sdiv))]
8301pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
8302    svdiv_s32_x(pg, op1, svdup_n_s32(op2))
8303}
8304#[doc = "Divide"]
8305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"]
8306#[inline(always)]
8307#[target_feature(enable = "sve")]
8308#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8309#[cfg_attr(test, assert_instr(sdiv))]
8310pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
8311    svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
8312}
8313#[doc = "Divide"]
8314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"]
8315#[inline(always)]
8316#[target_feature(enable = "sve")]
8317#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8318#[cfg_attr(test, assert_instr(sdiv))]
8319pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
8320    svdiv_s32_z(pg, op1, svdup_n_s32(op2))
8321}
8322#[doc = "Divide"]
8323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_m)"]
8324#[inline(always)]
8325#[target_feature(enable = "sve")]
8326#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8327#[cfg_attr(test, assert_instr(sdiv))]
8328pub fn svdiv_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
8329    unsafe extern "unadjusted" {
8330        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv2i64")]
8331        fn _svdiv_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
8332    }
8333    unsafe { _svdiv_s64_m(pg.sve_into(), op1, op2) }
8334}
8335#[doc = "Divide"]
8336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_m)"]
8337#[inline(always)]
8338#[target_feature(enable = "sve")]
8339#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8340#[cfg_attr(test, assert_instr(sdiv))]
8341pub fn svdiv_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
8342    svdiv_s64_m(pg, op1, svdup_n_s64(op2))
8343}
8344#[doc = "Divide"]
8345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_x)"]
8346#[inline(always)]
8347#[target_feature(enable = "sve")]
8348#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8349#[cfg_attr(test, assert_instr(sdiv))]
8350pub fn svdiv_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
8351    svdiv_s64_m(pg, op1, op2)
8352}
8353#[doc = "Divide"]
8354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_x)"]
8355#[inline(always)]
8356#[target_feature(enable = "sve")]
8357#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8358#[cfg_attr(test, assert_instr(sdiv))]
8359pub fn svdiv_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
8360    svdiv_s64_x(pg, op1, svdup_n_s64(op2))
8361}
8362#[doc = "Divide"]
8363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_z)"]
8364#[inline(always)]
8365#[target_feature(enable = "sve")]
8366#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8367#[cfg_attr(test, assert_instr(sdiv))]
8368pub fn svdiv_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
8369    svdiv_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
8370}
8371#[doc = "Divide"]
8372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_z)"]
8373#[inline(always)]
8374#[target_feature(enable = "sve")]
8375#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8376#[cfg_attr(test, assert_instr(sdiv))]
8377pub fn svdiv_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
8378    svdiv_s64_z(pg, op1, svdup_n_s64(op2))
8379}
8380#[doc = "Divide"]
8381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_m)"]
8382#[inline(always)]
8383#[target_feature(enable = "sve")]
8384#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8385#[cfg_attr(test, assert_instr(udiv))]
8386pub fn svdiv_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
8387    unsafe extern "unadjusted" {
8388        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv4i32")]
8389        fn _svdiv_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
8390    }
8391    unsafe { _svdiv_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
8392}
8393#[doc = "Divide"]
8394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_m)"]
8395#[inline(always)]
8396#[target_feature(enable = "sve")]
8397#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8398#[cfg_attr(test, assert_instr(udiv))]
8399pub fn svdiv_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
8400    svdiv_u32_m(pg, op1, svdup_n_u32(op2))
8401}
8402#[doc = "Divide"]
8403#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_x)"]
8404#[inline(always)]
8405#[target_feature(enable = "sve")]
8406#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8407#[cfg_attr(test, assert_instr(udiv))]
8408pub fn svdiv_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
8409    svdiv_u32_m(pg, op1, op2)
8410}
8411#[doc = "Divide"]
8412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_x)"]
8413#[inline(always)]
8414#[target_feature(enable = "sve")]
8415#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8416#[cfg_attr(test, assert_instr(udiv))]
8417pub fn svdiv_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
8418    svdiv_u32_x(pg, op1, svdup_n_u32(op2))
8419}
8420#[doc = "Divide"]
8421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_z)"]
8422#[inline(always)]
8423#[target_feature(enable = "sve")]
8424#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8425#[cfg_attr(test, assert_instr(udiv))]
8426pub fn svdiv_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
8427    svdiv_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
8428}
8429#[doc = "Divide"]
8430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_z)"]
8431#[inline(always)]
8432#[target_feature(enable = "sve")]
8433#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8434#[cfg_attr(test, assert_instr(udiv))]
8435pub fn svdiv_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
8436    svdiv_u32_z(pg, op1, svdup_n_u32(op2))
8437}
8438#[doc = "Divide"]
8439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_m)"]
8440#[inline(always)]
8441#[target_feature(enable = "sve")]
8442#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8443#[cfg_attr(test, assert_instr(udiv))]
8444pub fn svdiv_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
8445    unsafe extern "unadjusted" {
8446        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv2i64")]
8447        fn _svdiv_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
8448    }
8449    unsafe { _svdiv_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
8450}
8451#[doc = "Divide"]
8452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_m)"]
8453#[inline(always)]
8454#[target_feature(enable = "sve")]
8455#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8456#[cfg_attr(test, assert_instr(udiv))]
8457pub fn svdiv_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
8458    svdiv_u64_m(pg, op1, svdup_n_u64(op2))
8459}
8460#[doc = "Divide"]
8461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_x)"]
8462#[inline(always)]
8463#[target_feature(enable = "sve")]
8464#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8465#[cfg_attr(test, assert_instr(udiv))]
8466pub fn svdiv_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
8467    svdiv_u64_m(pg, op1, op2)
8468}
8469#[doc = "Divide"]
8470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_x)"]
8471#[inline(always)]
8472#[target_feature(enable = "sve")]
8473#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8474#[cfg_attr(test, assert_instr(udiv))]
8475pub fn svdiv_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
8476    svdiv_u64_x(pg, op1, svdup_n_u64(op2))
8477}
8478#[doc = "Divide"]
8479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_z)"]
8480#[inline(always)]
8481#[target_feature(enable = "sve")]
8482#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8483#[cfg_attr(test, assert_instr(udiv))]
8484pub fn svdiv_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
8485    svdiv_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
8486}
8487#[doc = "Divide"]
8488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_z)"]
8489#[inline(always)]
8490#[target_feature(enable = "sve")]
8491#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8492#[cfg_attr(test, assert_instr(udiv))]
8493pub fn svdiv_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
8494    svdiv_u64_z(pg, op1, svdup_n_u64(op2))
8495}
8496#[doc = "Divide reversed"]
8497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_m)"]
8498#[inline(always)]
8499#[target_feature(enable = "sve")]
8500#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8501#[cfg_attr(test, assert_instr(fdivr))]
8502pub fn svdivr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8503    unsafe extern "unadjusted" {
8504        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv4f32")]
8505        fn _svdivr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
8506    }
8507    unsafe { _svdivr_f32_m(pg.sve_into(), op1, op2) }
8508}
8509#[doc = "Divide reversed"]
8510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_m)"]
8511#[inline(always)]
8512#[target_feature(enable = "sve")]
8513#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8514#[cfg_attr(test, assert_instr(fdivr))]
8515pub fn svdivr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
8516    svdivr_f32_m(pg, op1, svdup_n_f32(op2))
8517}
8518#[doc = "Divide reversed"]
8519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_x)"]
8520#[inline(always)]
8521#[target_feature(enable = "sve")]
8522#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8523#[cfg_attr(test, assert_instr(fdivr))]
8524pub fn svdivr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8525    svdivr_f32_m(pg, op1, op2)
8526}
8527#[doc = "Divide reversed"]
8528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_x)"]
8529#[inline(always)]
8530#[target_feature(enable = "sve")]
8531#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8532#[cfg_attr(test, assert_instr(fdivr))]
8533pub fn svdivr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
8534    svdivr_f32_x(pg, op1, svdup_n_f32(op2))
8535}
8536#[doc = "Divide reversed"]
8537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_z)"]
8538#[inline(always)]
8539#[target_feature(enable = "sve")]
8540#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8541#[cfg_attr(test, assert_instr(fdivr))]
8542pub fn svdivr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
8543    svdivr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
8544}
8545#[doc = "Divide reversed"]
8546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_z)"]
8547#[inline(always)]
8548#[target_feature(enable = "sve")]
8549#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8550#[cfg_attr(test, assert_instr(fdivr))]
8551pub fn svdivr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
8552    svdivr_f32_z(pg, op1, svdup_n_f32(op2))
8553}
8554#[doc = "Divide reversed"]
8555#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_m)"]
8556#[inline(always)]
8557#[target_feature(enable = "sve")]
8558#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8559#[cfg_attr(test, assert_instr(fdivr))]
8560pub fn svdivr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8561    unsafe extern "unadjusted" {
8562        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv2f64")]
8563        fn _svdivr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
8564    }
8565    unsafe { _svdivr_f64_m(pg.sve_into(), op1, op2) }
8566}
8567#[doc = "Divide reversed"]
8568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_m)"]
8569#[inline(always)]
8570#[target_feature(enable = "sve")]
8571#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8572#[cfg_attr(test, assert_instr(fdivr))]
8573pub fn svdivr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
8574    svdivr_f64_m(pg, op1, svdup_n_f64(op2))
8575}
8576#[doc = "Divide reversed"]
8577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_x)"]
8578#[inline(always)]
8579#[target_feature(enable = "sve")]
8580#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8581#[cfg_attr(test, assert_instr(fdivr))]
8582pub fn svdivr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8583    svdivr_f64_m(pg, op1, op2)
8584}
8585#[doc = "Divide reversed"]
8586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_x)"]
8587#[inline(always)]
8588#[target_feature(enable = "sve")]
8589#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8590#[cfg_attr(test, assert_instr(fdivr))]
8591pub fn svdivr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
8592    svdivr_f64_x(pg, op1, svdup_n_f64(op2))
8593}
8594#[doc = "Divide reversed"]
8595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_z)"]
8596#[inline(always)]
8597#[target_feature(enable = "sve")]
8598#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8599#[cfg_attr(test, assert_instr(fdivr))]
8600pub fn svdivr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
8601    svdivr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
8602}
8603#[doc = "Divide reversed"]
8604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_z)"]
8605#[inline(always)]
8606#[target_feature(enable = "sve")]
8607#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8608#[cfg_attr(test, assert_instr(fdivr))]
8609pub fn svdivr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
8610    svdivr_f64_z(pg, op1, svdup_n_f64(op2))
8611}
8612#[doc = "Divide reversed"]
8613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_m)"]
8614#[inline(always)]
8615#[target_feature(enable = "sve")]
8616#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8617#[cfg_attr(test, assert_instr(sdivr))]
8618pub fn svdivr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
8619    unsafe extern "unadjusted" {
8620        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv4i32")]
8621        fn _svdivr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
8622    }
8623    unsafe { _svdivr_s32_m(pg.sve_into(), op1, op2) }
8624}
8625#[doc = "Divide reversed"]
8626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_m)"]
8627#[inline(always)]
8628#[target_feature(enable = "sve")]
8629#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8630#[cfg_attr(test, assert_instr(sdivr))]
8631pub fn svdivr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
8632    svdivr_s32_m(pg, op1, svdup_n_s32(op2))
8633}
8634#[doc = "Divide reversed"]
8635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_x)"]
8636#[inline(always)]
8637#[target_feature(enable = "sve")]
8638#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8639#[cfg_attr(test, assert_instr(sdivr))]
8640pub fn svdivr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
8641    svdivr_s32_m(pg, op1, op2)
8642}
8643#[doc = "Divide reversed"]
8644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_x)"]
8645#[inline(always)]
8646#[target_feature(enable = "sve")]
8647#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8648#[cfg_attr(test, assert_instr(sdivr))]
8649pub fn svdivr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
8650    svdivr_s32_x(pg, op1, svdup_n_s32(op2))
8651}
8652#[doc = "Divide reversed"]
8653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_z)"]
8654#[inline(always)]
8655#[target_feature(enable = "sve")]
8656#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8657#[cfg_attr(test, assert_instr(sdivr))]
8658pub fn svdivr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
8659    svdivr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
8660}
8661#[doc = "Divide reversed"]
8662#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_z)"]
8663#[inline(always)]
8664#[target_feature(enable = "sve")]
8665#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8666#[cfg_attr(test, assert_instr(sdivr))]
8667pub fn svdivr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
8668    svdivr_s32_z(pg, op1, svdup_n_s32(op2))
8669}
8670#[doc = "Divide reversed"]
8671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_m)"]
8672#[inline(always)]
8673#[target_feature(enable = "sve")]
8674#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8675#[cfg_attr(test, assert_instr(sdivr))]
8676pub fn svdivr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
8677    unsafe extern "unadjusted" {
8678        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv2i64")]
8679        fn _svdivr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
8680    }
8681    unsafe { _svdivr_s64_m(pg.sve_into(), op1, op2) }
8682}
8683#[doc = "Divide reversed"]
8684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_m)"]
8685#[inline(always)]
8686#[target_feature(enable = "sve")]
8687#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8688#[cfg_attr(test, assert_instr(sdivr))]
8689pub fn svdivr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
8690    svdivr_s64_m(pg, op1, svdup_n_s64(op2))
8691}
8692#[doc = "Divide reversed"]
8693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_x)"]
8694#[inline(always)]
8695#[target_feature(enable = "sve")]
8696#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8697#[cfg_attr(test, assert_instr(sdivr))]
8698pub fn svdivr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
8699    svdivr_s64_m(pg, op1, op2)
8700}
8701#[doc = "Divide reversed"]
8702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_x)"]
8703#[inline(always)]
8704#[target_feature(enable = "sve")]
8705#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8706#[cfg_attr(test, assert_instr(sdivr))]
8707pub fn svdivr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
8708    svdivr_s64_x(pg, op1, svdup_n_s64(op2))
8709}
8710#[doc = "Divide reversed"]
8711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_z)"]
8712#[inline(always)]
8713#[target_feature(enable = "sve")]
8714#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8715#[cfg_attr(test, assert_instr(sdivr))]
8716pub fn svdivr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
8717    svdivr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
8718}
8719#[doc = "Divide reversed"]
8720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_z)"]
8721#[inline(always)]
8722#[target_feature(enable = "sve")]
8723#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8724#[cfg_attr(test, assert_instr(sdivr))]
8725pub fn svdivr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
8726    svdivr_s64_z(pg, op1, svdup_n_s64(op2))
8727}
8728#[doc = "Divide reversed"]
8729#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_m)"]
8730#[inline(always)]
8731#[target_feature(enable = "sve")]
8732#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8733#[cfg_attr(test, assert_instr(udivr))]
8734pub fn svdivr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
8735    unsafe extern "unadjusted" {
8736        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv4i32")]
8737        fn _svdivr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
8738    }
8739    unsafe { _svdivr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
8740}
8741#[doc = "Divide reversed"]
8742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_m)"]
8743#[inline(always)]
8744#[target_feature(enable = "sve")]
8745#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8746#[cfg_attr(test, assert_instr(udivr))]
8747pub fn svdivr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
8748    svdivr_u32_m(pg, op1, svdup_n_u32(op2))
8749}
8750#[doc = "Divide reversed"]
8751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_x)"]
8752#[inline(always)]
8753#[target_feature(enable = "sve")]
8754#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8755#[cfg_attr(test, assert_instr(udivr))]
8756pub fn svdivr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
8757    svdivr_u32_m(pg, op1, op2)
8758}
8759#[doc = "Divide reversed"]
8760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_x)"]
8761#[inline(always)]
8762#[target_feature(enable = "sve")]
8763#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8764#[cfg_attr(test, assert_instr(udivr))]
8765pub fn svdivr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
8766    svdivr_u32_x(pg, op1, svdup_n_u32(op2))
8767}
8768#[doc = "Divide reversed"]
8769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_z)"]
8770#[inline(always)]
8771#[target_feature(enable = "sve")]
8772#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8773#[cfg_attr(test, assert_instr(udivr))]
8774pub fn svdivr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
8775    svdivr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
8776}
8777#[doc = "Divide reversed"]
8778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_z)"]
8779#[inline(always)]
8780#[target_feature(enable = "sve")]
8781#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8782#[cfg_attr(test, assert_instr(udivr))]
8783pub fn svdivr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
8784    svdivr_u32_z(pg, op1, svdup_n_u32(op2))
8785}
8786#[doc = "Divide reversed"]
8787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_m)"]
8788#[inline(always)]
8789#[target_feature(enable = "sve")]
8790#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8791#[cfg_attr(test, assert_instr(udivr))]
8792pub fn svdivr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
8793    unsafe extern "unadjusted" {
8794        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv2i64")]
8795        fn _svdivr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
8796    }
8797    unsafe { _svdivr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
8798}
8799#[doc = "Divide reversed"]
8800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_m)"]
8801#[inline(always)]
8802#[target_feature(enable = "sve")]
8803#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8804#[cfg_attr(test, assert_instr(udivr))]
8805pub fn svdivr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
8806    svdivr_u64_m(pg, op1, svdup_n_u64(op2))
8807}
8808#[doc = "Divide reversed"]
8809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_x)"]
8810#[inline(always)]
8811#[target_feature(enable = "sve")]
8812#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8813#[cfg_attr(test, assert_instr(udivr))]
8814pub fn svdivr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
8815    svdivr_u64_m(pg, op1, op2)
8816}
8817#[doc = "Divide reversed"]
8818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_x)"]
8819#[inline(always)]
8820#[target_feature(enable = "sve")]
8821#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8822#[cfg_attr(test, assert_instr(udivr))]
8823pub fn svdivr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
8824    svdivr_u64_x(pg, op1, svdup_n_u64(op2))
8825}
8826#[doc = "Divide reversed"]
8827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_z)"]
8828#[inline(always)]
8829#[target_feature(enable = "sve")]
8830#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8831#[cfg_attr(test, assert_instr(udivr))]
8832pub fn svdivr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
8833    svdivr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
8834}
8835#[doc = "Divide reversed"]
8836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_z)"]
8837#[inline(always)]
8838#[target_feature(enable = "sve")]
8839#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8840#[cfg_attr(test, assert_instr(udivr))]
8841pub fn svdivr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
8842    svdivr_u64_z(pg, op1, svdup_n_u64(op2))
8843}
8844#[doc = "Dot product"]
8845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s32])"]
8846#[inline(always)]
8847#[target_feature(enable = "sve")]
8848#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8849#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))]
8850pub fn svdot_lane_s32<const IMM_INDEX: i32>(
8851    op1: svint32_t,
8852    op2: svint8_t,
8853    op3: svint8_t,
8854) -> svint32_t {
8855    static_assert_range!(IMM_INDEX, 0..=3);
8856    unsafe extern "unadjusted" {
8857        #[cfg_attr(
8858            target_arch = "aarch64",
8859            link_name = "llvm.aarch64.sve.sdot.lane.nxv4i32"
8860        )]
8861        fn _svdot_lane_s32(
8862            op1: svint32_t,
8863            op2: svint8_t,
8864            op3: svint8_t,
8865            imm_index: i32,
8866        ) -> svint32_t;
8867    }
8868    unsafe { _svdot_lane_s32(op1, op2, op3, IMM_INDEX) }
8869}
8870#[doc = "Dot product"]
8871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s64])"]
8872#[inline(always)]
8873#[target_feature(enable = "sve")]
8874#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8875#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))]
8876pub fn svdot_lane_s64<const IMM_INDEX: i32>(
8877    op1: svint64_t,
8878    op2: svint16_t,
8879    op3: svint16_t,
8880) -> svint64_t {
8881    static_assert_range!(IMM_INDEX, 0..=1);
8882    unsafe extern "unadjusted" {
8883        #[cfg_attr(
8884            target_arch = "aarch64",
8885            link_name = "llvm.aarch64.sve.sdot.lane.nxv2i64"
8886        )]
8887        fn _svdot_lane_s64(
8888            op1: svint64_t,
8889            op2: svint16_t,
8890            op3: svint16_t,
8891            imm_index: i32,
8892        ) -> svint64_t;
8893    }
8894    unsafe { _svdot_lane_s64(op1, op2, op3, IMM_INDEX) }
8895}
8896#[doc = "Dot product"]
8897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u32])"]
8898#[inline(always)]
8899#[target_feature(enable = "sve")]
8900#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8901#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))]
8902pub fn svdot_lane_u32<const IMM_INDEX: i32>(
8903    op1: svuint32_t,
8904    op2: svuint8_t,
8905    op3: svuint8_t,
8906) -> svuint32_t {
8907    static_assert_range!(IMM_INDEX, 0..=3);
8908    unsafe extern "unadjusted" {
8909        #[cfg_attr(
8910            target_arch = "aarch64",
8911            link_name = "llvm.aarch64.sve.udot.lane.nxv4i32"
8912        )]
8913        fn _svdot_lane_u32(
8914            op1: svint32_t,
8915            op2: svint8_t,
8916            op3: svint8_t,
8917            imm_index: i32,
8918        ) -> svint32_t;
8919    }
8920    unsafe {
8921        _svdot_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned()
8922    }
8923}
8924#[doc = "Dot product"]
8925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u64])"]
8926#[inline(always)]
8927#[target_feature(enable = "sve")]
8928#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8929#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))]
8930pub fn svdot_lane_u64<const IMM_INDEX: i32>(
8931    op1: svuint64_t,
8932    op2: svuint16_t,
8933    op3: svuint16_t,
8934) -> svuint64_t {
8935    static_assert_range!(IMM_INDEX, 0..=1);
8936    unsafe extern "unadjusted" {
8937        #[cfg_attr(
8938            target_arch = "aarch64",
8939            link_name = "llvm.aarch64.sve.udot.lane.nxv2i64"
8940        )]
8941        fn _svdot_lane_u64(
8942            op1: svint64_t,
8943            op2: svint16_t,
8944            op3: svint16_t,
8945            imm_index: i32,
8946        ) -> svint64_t;
8947    }
8948    unsafe {
8949        _svdot_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned()
8950    }
8951}
8952#[doc = "Dot product"]
8953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s32])"]
8954#[inline(always)]
8955#[target_feature(enable = "sve")]
8956#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8957#[cfg_attr(test, assert_instr(sdot))]
8958pub fn svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t {
8959    unsafe extern "unadjusted" {
8960        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv4i32")]
8961        fn _svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
8962    }
8963    unsafe { _svdot_s32(op1, op2, op3) }
8964}
8965#[doc = "Dot product"]
8966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s32])"]
8967#[inline(always)]
8968#[target_feature(enable = "sve")]
8969#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8970#[cfg_attr(test, assert_instr(sdot))]
8971pub fn svdot_n_s32(op1: svint32_t, op2: svint8_t, op3: i8) -> svint32_t {
8972    svdot_s32(op1, op2, svdup_n_s8(op3))
8973}
8974#[doc = "Dot product"]
8975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s64])"]
8976#[inline(always)]
8977#[target_feature(enable = "sve")]
8978#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8979#[cfg_attr(test, assert_instr(sdot))]
8980pub fn svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t {
8981    unsafe extern "unadjusted" {
8982        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv2i64")]
8983        fn _svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t;
8984    }
8985    unsafe { _svdot_s64(op1, op2, op3) }
8986}
8987#[doc = "Dot product"]
8988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s64])"]
8989#[inline(always)]
8990#[target_feature(enable = "sve")]
8991#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
8992#[cfg_attr(test, assert_instr(sdot))]
8993pub fn svdot_n_s64(op1: svint64_t, op2: svint16_t, op3: i16) -> svint64_t {
8994    svdot_s64(op1, op2, svdup_n_s16(op3))
8995}
8996#[doc = "Dot product"]
8997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u32])"]
8998#[inline(always)]
8999#[target_feature(enable = "sve")]
9000#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9001#[cfg_attr(test, assert_instr(udot))]
9002pub fn svdot_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t {
9003    unsafe extern "unadjusted" {
9004        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv4i32")]
9005        fn _svdot_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
9006    }
9007    unsafe { _svdot_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
9008}
9009#[doc = "Dot product"]
9010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u32])"]
9011#[inline(always)]
9012#[target_feature(enable = "sve")]
9013#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9014#[cfg_attr(test, assert_instr(udot))]
9015pub fn svdot_n_u32(op1: svuint32_t, op2: svuint8_t, op3: u8) -> svuint32_t {
9016    svdot_u32(op1, op2, svdup_n_u8(op3))
9017}
9018#[doc = "Dot product"]
9019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u64])"]
9020#[inline(always)]
9021#[target_feature(enable = "sve")]
9022#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9023#[cfg_attr(test, assert_instr(udot))]
9024pub fn svdot_u64(op1: svuint64_t, op2: svuint16_t, op3: svuint16_t) -> svuint64_t {
9025    unsafe extern "unadjusted" {
9026        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv2i64")]
9027        fn _svdot_u64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t;
9028    }
9029    unsafe { _svdot_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
9030}
9031#[doc = "Dot product"]
9032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u64])"]
9033#[inline(always)]
9034#[target_feature(enable = "sve")]
9035#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9036#[cfg_attr(test, assert_instr(udot))]
9037pub fn svdot_n_u64(op1: svuint64_t, op2: svuint16_t, op3: u16) -> svuint64_t {
9038    svdot_u64(op1, op2, svdup_n_u16(op3))
9039}
9040#[doc = "Broadcast a scalar value"]
9041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f32])"]
9042#[inline(always)]
9043#[target_feature(enable = "sve")]
9044#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9045#[cfg_attr(test, assert_instr(tbl))]
9046pub fn svdup_lane_f32(data: svfloat32_t, index: u32) -> svfloat32_t {
9047    svtbl_f32(data, svdup_n_u32(index))
9048}
9049#[doc = "Broadcast a scalar value"]
9050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f64])"]
9051#[inline(always)]
9052#[target_feature(enable = "sve")]
9053#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9054#[cfg_attr(test, assert_instr(tbl))]
9055pub fn svdup_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t {
9056    svtbl_f64(data, svdup_n_u64(index))
9057}
9058#[doc = "Broadcast a scalar value"]
9059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s8])"]
9060#[inline(always)]
9061#[target_feature(enable = "sve")]
9062#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9063#[cfg_attr(test, assert_instr(tbl))]
9064pub fn svdup_lane_s8(data: svint8_t, index: u8) -> svint8_t {
9065    svtbl_s8(data, svdup_n_u8(index))
9066}
9067#[doc = "Broadcast a scalar value"]
9068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s16])"]
9069#[inline(always)]
9070#[target_feature(enable = "sve")]
9071#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9072#[cfg_attr(test, assert_instr(tbl))]
9073pub fn svdup_lane_s16(data: svint16_t, index: u16) -> svint16_t {
9074    svtbl_s16(data, svdup_n_u16(index))
9075}
9076#[doc = "Broadcast a scalar value"]
9077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s32])"]
9078#[inline(always)]
9079#[target_feature(enable = "sve")]
9080#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9081#[cfg_attr(test, assert_instr(tbl))]
9082pub fn svdup_lane_s32(data: svint32_t, index: u32) -> svint32_t {
9083    svtbl_s32(data, svdup_n_u32(index))
9084}
9085#[doc = "Broadcast a scalar value"]
9086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s64])"]
9087#[inline(always)]
9088#[target_feature(enable = "sve")]
9089#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9090#[cfg_attr(test, assert_instr(tbl))]
9091pub fn svdup_lane_s64(data: svint64_t, index: u64) -> svint64_t {
9092    svtbl_s64(data, svdup_n_u64(index))
9093}
9094#[doc = "Broadcast a scalar value"]
9095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u8])"]
9096#[inline(always)]
9097#[target_feature(enable = "sve")]
9098#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9099#[cfg_attr(test, assert_instr(tbl))]
9100pub fn svdup_lane_u8(data: svuint8_t, index: u8) -> svuint8_t {
9101    svtbl_u8(data, svdup_n_u8(index))
9102}
9103#[doc = "Broadcast a scalar value"]
9104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u16])"]
9105#[inline(always)]
9106#[target_feature(enable = "sve")]
9107#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9108#[cfg_attr(test, assert_instr(tbl))]
9109pub fn svdup_lane_u16(data: svuint16_t, index: u16) -> svuint16_t {
9110    svtbl_u16(data, svdup_n_u16(index))
9111}
9112#[doc = "Broadcast a scalar value"]
9113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u32])"]
9114#[inline(always)]
9115#[target_feature(enable = "sve")]
9116#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9117#[cfg_attr(test, assert_instr(tbl))]
9118pub fn svdup_lane_u32(data: svuint32_t, index: u32) -> svuint32_t {
9119    svtbl_u32(data, svdup_n_u32(index))
9120}
9121#[doc = "Broadcast a scalar value"]
9122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u64])"]
9123#[inline(always)]
9124#[target_feature(enable = "sve")]
9125#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9126#[cfg_attr(test, assert_instr(tbl))]
9127pub fn svdup_lane_u64(data: svuint64_t, index: u64) -> svuint64_t {
9128    svtbl_u64(data, svdup_n_u64(index))
9129}
9130#[doc = "Broadcast a scalar value"]
9131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b8)"]
9132#[inline(always)]
9133#[target_feature(enable = "sve")]
9134#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9135#[cfg_attr(test, assert_instr(sbfx))]
9136#[cfg_attr(test, assert_instr(whilelo))]
9137pub fn svdup_n_b8(op: bool) -> svbool_t {
9138    unsafe extern "unadjusted" {
9139        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i1")]
9140        fn _svdup_n_b8(op: bool) -> svbool_t;
9141    }
9142    unsafe { _svdup_n_b8(op) }
9143}
9144#[doc = "Broadcast a scalar value"]
9145#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b16)"]
9146#[inline(always)]
9147#[target_feature(enable = "sve")]
9148#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9149#[cfg_attr(test, assert_instr(sbfx))]
9150#[cfg_attr(test, assert_instr(whilelo))]
9151pub fn svdup_n_b16(op: bool) -> svbool_t {
9152    unsafe extern "unadjusted" {
9153        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i1")]
9154        fn _svdup_n_b16(op: bool) -> svbool8_t;
9155    }
9156    unsafe { _svdup_n_b16(op).sve_into() }
9157}
9158#[doc = "Broadcast a scalar value"]
9159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b32)"]
9160#[inline(always)]
9161#[target_feature(enable = "sve")]
9162#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9163#[cfg_attr(test, assert_instr(sbfx))]
9164#[cfg_attr(test, assert_instr(whilelo))]
9165pub fn svdup_n_b32(op: bool) -> svbool_t {
9166    unsafe extern "unadjusted" {
9167        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i1")]
9168        fn _svdup_n_b32(op: bool) -> svbool4_t;
9169    }
9170    unsafe { _svdup_n_b32(op).sve_into() }
9171}
9172#[doc = "Broadcast a scalar value"]
9173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b64)"]
9174#[inline(always)]
9175#[target_feature(enable = "sve")]
9176#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9177#[cfg_attr(test, assert_instr(sbfx))]
9178#[cfg_attr(test, assert_instr(whilelo))]
9179pub fn svdup_n_b64(op: bool) -> svbool_t {
9180    unsafe extern "unadjusted" {
9181        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i1")]
9182        fn _svdup_n_b64(op: bool) -> svbool2_t;
9183    }
9184    unsafe { _svdup_n_b64(op).sve_into() }
9185}
9186#[doc = "Broadcast a scalar value"]
9187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"]
9188#[inline(always)]
9189#[target_feature(enable = "sve")]
9190#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9191#[cfg_attr(test, assert_instr(mov))]
9192pub fn svdup_n_f32(op: f32) -> svfloat32_t {
9193    unsafe extern "unadjusted" {
9194        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")]
9195        fn _svdup_n_f32(op: f32) -> svfloat32_t;
9196    }
9197    unsafe { _svdup_n_f32(op) }
9198}
9199#[doc = "Broadcast a scalar value"]
9200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"]
9201#[inline(always)]
9202#[target_feature(enable = "sve")]
9203#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9204#[cfg_attr(test, assert_instr(mov))]
9205pub fn svdup_n_f64(op: f64) -> svfloat64_t {
9206    unsafe extern "unadjusted" {
9207        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")]
9208        fn _svdup_n_f64(op: f64) -> svfloat64_t;
9209    }
9210    unsafe { _svdup_n_f64(op) }
9211}
9212#[doc = "Broadcast a scalar value"]
9213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"]
9214#[inline(always)]
9215#[target_feature(enable = "sve")]
9216#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9217#[cfg_attr(test, assert_instr(mov))]
9218pub fn svdup_n_s8(op: i8) -> svint8_t {
9219    unsafe extern "unadjusted" {
9220        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")]
9221        fn _svdup_n_s8(op: i8) -> svint8_t;
9222    }
9223    unsafe { _svdup_n_s8(op) }
9224}
9225#[doc = "Broadcast a scalar value"]
9226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"]
9227#[inline(always)]
9228#[target_feature(enable = "sve")]
9229#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9230#[cfg_attr(test, assert_instr(mov))]
9231pub fn svdup_n_s16(op: i16) -> svint16_t {
9232    unsafe extern "unadjusted" {
9233        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")]
9234        fn _svdup_n_s16(op: i16) -> svint16_t;
9235    }
9236    unsafe { _svdup_n_s16(op) }
9237}
9238#[doc = "Broadcast a scalar value"]
9239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"]
9240#[inline(always)]
9241#[target_feature(enable = "sve")]
9242#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9243#[cfg_attr(test, assert_instr(mov))]
9244pub fn svdup_n_s32(op: i32) -> svint32_t {
9245    unsafe extern "unadjusted" {
9246        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")]
9247        fn _svdup_n_s32(op: i32) -> svint32_t;
9248    }
9249    unsafe { _svdup_n_s32(op) }
9250}
9251#[doc = "Broadcast a scalar value"]
9252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"]
9253#[inline(always)]
9254#[target_feature(enable = "sve")]
9255#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9256#[cfg_attr(test, assert_instr(mov))]
9257pub fn svdup_n_s64(op: i64) -> svint64_t {
9258    unsafe extern "unadjusted" {
9259        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")]
9260        fn _svdup_n_s64(op: i64) -> svint64_t;
9261    }
9262    unsafe { _svdup_n_s64(op) }
9263}
9264#[doc = "Broadcast a scalar value"]
9265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"]
9266#[inline(always)]
9267#[target_feature(enable = "sve")]
9268#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9269#[cfg_attr(test, assert_instr(mov))]
9270pub fn svdup_n_u8(op: u8) -> svuint8_t {
9271    unsafe { svdup_n_s8(op.as_signed()).as_unsigned() }
9272}
9273#[doc = "Broadcast a scalar value"]
9274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"]
9275#[inline(always)]
9276#[target_feature(enable = "sve")]
9277#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9278#[cfg_attr(test, assert_instr(mov))]
9279pub fn svdup_n_u16(op: u16) -> svuint16_t {
9280    unsafe { svdup_n_s16(op.as_signed()).as_unsigned() }
9281}
9282#[doc = "Broadcast a scalar value"]
9283#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"]
9284#[inline(always)]
9285#[target_feature(enable = "sve")]
9286#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9287#[cfg_attr(test, assert_instr(mov))]
9288pub fn svdup_n_u32(op: u32) -> svuint32_t {
9289    unsafe { svdup_n_s32(op.as_signed()).as_unsigned() }
9290}
9291#[doc = "Broadcast a scalar value"]
9292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"]
9293#[inline(always)]
9294#[target_feature(enable = "sve")]
9295#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9296#[cfg_attr(test, assert_instr(mov))]
9297pub fn svdup_n_u64(op: u64) -> svuint64_t {
9298    unsafe { svdup_n_s64(op.as_signed()).as_unsigned() }
9299}
9300#[doc = "Broadcast a scalar value"]
9301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_m)"]
9302#[inline(always)]
9303#[target_feature(enable = "sve")]
9304#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9305#[cfg_attr(test, assert_instr(mov))]
9306pub fn svdup_n_f32_m(inactive: svfloat32_t, pg: svbool_t, op: f32) -> svfloat32_t {
9307    unsafe extern "unadjusted" {
9308        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4f32")]
9309        fn _svdup_n_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: f32) -> svfloat32_t;
9310    }
9311    unsafe { _svdup_n_f32_m(inactive, pg.sve_into(), op) }
9312}
9313#[doc = "Broadcast a scalar value"]
9314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_x)"]
9315#[inline(always)]
9316#[target_feature(enable = "sve")]
9317#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9318#[cfg_attr(test, assert_instr(mov))]
9319pub fn svdup_n_f32_x(pg: svbool_t, op: f32) -> svfloat32_t {
9320    svdup_n_f32_m(svdup_n_f32(0.0), pg, op)
9321}
9322#[doc = "Broadcast a scalar value"]
9323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_z)"]
9324#[inline(always)]
9325#[target_feature(enable = "sve")]
9326#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9327#[cfg_attr(test, assert_instr(mov))]
9328pub fn svdup_n_f32_z(pg: svbool_t, op: f32) -> svfloat32_t {
9329    svdup_n_f32_m(svdup_n_f32(0.0), pg, op)
9330}
9331#[doc = "Broadcast a scalar value"]
9332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_m)"]
9333#[inline(always)]
9334#[target_feature(enable = "sve")]
9335#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9336#[cfg_attr(test, assert_instr(mov))]
9337pub fn svdup_n_f64_m(inactive: svfloat64_t, pg: svbool_t, op: f64) -> svfloat64_t {
9338    unsafe extern "unadjusted" {
9339        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2f64")]
9340        fn _svdup_n_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: f64) -> svfloat64_t;
9341    }
9342    unsafe { _svdup_n_f64_m(inactive, pg.sve_into(), op) }
9343}
9344#[doc = "Broadcast a scalar value"]
9345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_x)"]
9346#[inline(always)]
9347#[target_feature(enable = "sve")]
9348#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9349#[cfg_attr(test, assert_instr(mov))]
9350pub fn svdup_n_f64_x(pg: svbool_t, op: f64) -> svfloat64_t {
9351    svdup_n_f64_m(svdup_n_f64(0.0), pg, op)
9352}
9353#[doc = "Broadcast a scalar value"]
9354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_z)"]
9355#[inline(always)]
9356#[target_feature(enable = "sve")]
9357#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9358#[cfg_attr(test, assert_instr(mov))]
9359pub fn svdup_n_f64_z(pg: svbool_t, op: f64) -> svfloat64_t {
9360    svdup_n_f64_m(svdup_n_f64(0.0), pg, op)
9361}
9362#[doc = "Broadcast a scalar value"]
9363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_m)"]
9364#[inline(always)]
9365#[target_feature(enable = "sve")]
9366#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9367#[cfg_attr(test, assert_instr(mov))]
9368pub fn svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t {
9369    unsafe extern "unadjusted" {
9370        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv16i8")]
9371        fn _svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t;
9372    }
9373    unsafe { _svdup_n_s8_m(inactive, pg, op) }
9374}
9375#[doc = "Broadcast a scalar value"]
9376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_x)"]
9377#[inline(always)]
9378#[target_feature(enable = "sve")]
9379#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9380#[cfg_attr(test, assert_instr(mov))]
9381pub fn svdup_n_s8_x(pg: svbool_t, op: i8) -> svint8_t {
9382    svdup_n_s8_m(svdup_n_s8(0), pg, op)
9383}
9384#[doc = "Broadcast a scalar value"]
9385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_z)"]
9386#[inline(always)]
9387#[target_feature(enable = "sve")]
9388#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9389#[cfg_attr(test, assert_instr(mov))]
9390pub fn svdup_n_s8_z(pg: svbool_t, op: i8) -> svint8_t {
9391    svdup_n_s8_m(svdup_n_s8(0), pg, op)
9392}
9393#[doc = "Broadcast a scalar value"]
9394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_m)"]
9395#[inline(always)]
9396#[target_feature(enable = "sve")]
9397#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9398#[cfg_attr(test, assert_instr(mov))]
9399pub fn svdup_n_s16_m(inactive: svint16_t, pg: svbool_t, op: i16) -> svint16_t {
9400    unsafe extern "unadjusted" {
9401        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv8i16")]
9402        fn _svdup_n_s16_m(inactive: svint16_t, pg: svbool8_t, op: i16) -> svint16_t;
9403    }
9404    unsafe { _svdup_n_s16_m(inactive, pg.sve_into(), op) }
9405}
9406#[doc = "Broadcast a scalar value"]
9407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_x)"]
9408#[inline(always)]
9409#[target_feature(enable = "sve")]
9410#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9411#[cfg_attr(test, assert_instr(mov))]
9412pub fn svdup_n_s16_x(pg: svbool_t, op: i16) -> svint16_t {
9413    svdup_n_s16_m(svdup_n_s16(0), pg, op)
9414}
9415#[doc = "Broadcast a scalar value"]
9416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_z)"]
9417#[inline(always)]
9418#[target_feature(enable = "sve")]
9419#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9420#[cfg_attr(test, assert_instr(mov))]
9421pub fn svdup_n_s16_z(pg: svbool_t, op: i16) -> svint16_t {
9422    svdup_n_s16_m(svdup_n_s16(0), pg, op)
9423}
9424#[doc = "Broadcast a scalar value"]
9425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_m)"]
9426#[inline(always)]
9427#[target_feature(enable = "sve")]
9428#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9429#[cfg_attr(test, assert_instr(mov))]
9430pub fn svdup_n_s32_m(inactive: svint32_t, pg: svbool_t, op: i32) -> svint32_t {
9431    unsafe extern "unadjusted" {
9432        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4i32")]
9433        fn _svdup_n_s32_m(inactive: svint32_t, pg: svbool4_t, op: i32) -> svint32_t;
9434    }
9435    unsafe { _svdup_n_s32_m(inactive, pg.sve_into(), op) }
9436}
9437#[doc = "Broadcast a scalar value"]
9438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_x)"]
9439#[inline(always)]
9440#[target_feature(enable = "sve")]
9441#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9442#[cfg_attr(test, assert_instr(mov))]
9443pub fn svdup_n_s32_x(pg: svbool_t, op: i32) -> svint32_t {
9444    svdup_n_s32_m(svdup_n_s32(0), pg, op)
9445}
9446#[doc = "Broadcast a scalar value"]
9447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_z)"]
9448#[inline(always)]
9449#[target_feature(enable = "sve")]
9450#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9451#[cfg_attr(test, assert_instr(mov))]
9452pub fn svdup_n_s32_z(pg: svbool_t, op: i32) -> svint32_t {
9453    svdup_n_s32_m(svdup_n_s32(0), pg, op)
9454}
9455#[doc = "Broadcast a scalar value"]
9456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_m)"]
9457#[inline(always)]
9458#[target_feature(enable = "sve")]
9459#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9460#[cfg_attr(test, assert_instr(mov))]
9461pub fn svdup_n_s64_m(inactive: svint64_t, pg: svbool_t, op: i64) -> svint64_t {
9462    unsafe extern "unadjusted" {
9463        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2i64")]
9464        fn _svdup_n_s64_m(inactive: svint64_t, pg: svbool2_t, op: i64) -> svint64_t;
9465    }
9466    unsafe { _svdup_n_s64_m(inactive, pg.sve_into(), op) }
9467}
9468#[doc = "Broadcast a scalar value"]
9469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_x)"]
9470#[inline(always)]
9471#[target_feature(enable = "sve")]
9472#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9473#[cfg_attr(test, assert_instr(mov))]
9474pub fn svdup_n_s64_x(pg: svbool_t, op: i64) -> svint64_t {
9475    svdup_n_s64_m(svdup_n_s64(0), pg, op)
9476}
9477#[doc = "Broadcast a scalar value"]
9478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_z)"]
9479#[inline(always)]
9480#[target_feature(enable = "sve")]
9481#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9482#[cfg_attr(test, assert_instr(mov))]
9483pub fn svdup_n_s64_z(pg: svbool_t, op: i64) -> svint64_t {
9484    svdup_n_s64_m(svdup_n_s64(0), pg, op)
9485}
9486#[doc = "Broadcast a scalar value"]
9487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_m)"]
9488#[inline(always)]
9489#[target_feature(enable = "sve")]
9490#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9491#[cfg_attr(test, assert_instr(mov))]
9492pub fn svdup_n_u8_m(inactive: svuint8_t, pg: svbool_t, op: u8) -> svuint8_t {
9493    unsafe { svdup_n_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
9494}
9495#[doc = "Broadcast a scalar value"]
9496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_x)"]
9497#[inline(always)]
9498#[target_feature(enable = "sve")]
9499#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9500#[cfg_attr(test, assert_instr(mov))]
9501pub fn svdup_n_u8_x(pg: svbool_t, op: u8) -> svuint8_t {
9502    svdup_n_u8_m(svdup_n_u8(0), pg, op)
9503}
9504#[doc = "Broadcast a scalar value"]
9505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_z)"]
9506#[inline(always)]
9507#[target_feature(enable = "sve")]
9508#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9509#[cfg_attr(test, assert_instr(mov))]
9510pub fn svdup_n_u8_z(pg: svbool_t, op: u8) -> svuint8_t {
9511    svdup_n_u8_m(svdup_n_u8(0), pg, op)
9512}
9513#[doc = "Broadcast a scalar value"]
9514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_m)"]
9515#[inline(always)]
9516#[target_feature(enable = "sve")]
9517#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9518#[cfg_attr(test, assert_instr(mov))]
9519pub fn svdup_n_u16_m(inactive: svuint16_t, pg: svbool_t, op: u16) -> svuint16_t {
9520    unsafe { svdup_n_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
9521}
9522#[doc = "Broadcast a scalar value"]
9523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_x)"]
9524#[inline(always)]
9525#[target_feature(enable = "sve")]
9526#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9527#[cfg_attr(test, assert_instr(mov))]
9528pub fn svdup_n_u16_x(pg: svbool_t, op: u16) -> svuint16_t {
9529    svdup_n_u16_m(svdup_n_u16(0), pg, op)
9530}
9531#[doc = "Broadcast a scalar value"]
9532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_z)"]
9533#[inline(always)]
9534#[target_feature(enable = "sve")]
9535#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9536#[cfg_attr(test, assert_instr(mov))]
9537pub fn svdup_n_u16_z(pg: svbool_t, op: u16) -> svuint16_t {
9538    svdup_n_u16_m(svdup_n_u16(0), pg, op)
9539}
9540#[doc = "Broadcast a scalar value"]
9541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_m)"]
9542#[inline(always)]
9543#[target_feature(enable = "sve")]
9544#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9545#[cfg_attr(test, assert_instr(mov))]
9546pub fn svdup_n_u32_m(inactive: svuint32_t, pg: svbool_t, op: u32) -> svuint32_t {
9547    unsafe { svdup_n_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
9548}
9549#[doc = "Broadcast a scalar value"]
9550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_x)"]
9551#[inline(always)]
9552#[target_feature(enable = "sve")]
9553#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9554#[cfg_attr(test, assert_instr(mov))]
9555pub fn svdup_n_u32_x(pg: svbool_t, op: u32) -> svuint32_t {
9556    svdup_n_u32_m(svdup_n_u32(0), pg, op)
9557}
9558#[doc = "Broadcast a scalar value"]
9559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_z)"]
9560#[inline(always)]
9561#[target_feature(enable = "sve")]
9562#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9563#[cfg_attr(test, assert_instr(mov))]
9564pub fn svdup_n_u32_z(pg: svbool_t, op: u32) -> svuint32_t {
9565    svdup_n_u32_m(svdup_n_u32(0), pg, op)
9566}
9567#[doc = "Broadcast a scalar value"]
9568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_m)"]
9569#[inline(always)]
9570#[target_feature(enable = "sve")]
9571#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9572#[cfg_attr(test, assert_instr(mov))]
9573pub fn svdup_n_u64_m(inactive: svuint64_t, pg: svbool_t, op: u64) -> svuint64_t {
9574    unsafe { svdup_n_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
9575}
9576#[doc = "Broadcast a scalar value"]
9577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_x)"]
9578#[inline(always)]
9579#[target_feature(enable = "sve")]
9580#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9581#[cfg_attr(test, assert_instr(mov))]
9582pub fn svdup_n_u64_x(pg: svbool_t, op: u64) -> svuint64_t {
9583    svdup_n_u64_m(svdup_n_u64(0), pg, op)
9584}
9585#[doc = "Broadcast a scalar value"]
9586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_z)"]
9587#[inline(always)]
9588#[target_feature(enable = "sve")]
9589#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9590#[cfg_attr(test, assert_instr(mov))]
9591pub fn svdup_n_u64_z(pg: svbool_t, op: u64) -> svuint64_t {
9592    svdup_n_u64_m(svdup_n_u64(0), pg, op)
9593}
9594#[doc = "Broadcast a quadword of scalars"]
9595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f32])"]
9596#[inline(always)]
9597#[target_feature(enable = "sve")]
9598#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9599#[cfg_attr(test, assert_instr(tbl))]
9600pub fn svdupq_lane_f32(data: svfloat32_t, index: u64) -> svfloat32_t {
9601    unsafe extern "unadjusted" {
9602        #[cfg_attr(
9603            target_arch = "aarch64",
9604            link_name = "llvm.aarch64.sve.dupq.lane.nxv4f32"
9605        )]
9606        fn _svdupq_lane_f32(data: svfloat32_t, index: i64) -> svfloat32_t;
9607    }
9608    unsafe { _svdupq_lane_f32(data, index.as_signed()) }
9609}
9610#[doc = "Broadcast a quadword of scalars"]
9611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f64])"]
9612#[inline(always)]
9613#[target_feature(enable = "sve")]
9614#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9615#[cfg_attr(test, assert_instr(tbl))]
9616pub fn svdupq_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t {
9617    unsafe extern "unadjusted" {
9618        #[cfg_attr(
9619            target_arch = "aarch64",
9620            link_name = "llvm.aarch64.sve.dupq.lane.nxv2f64"
9621        )]
9622        fn _svdupq_lane_f64(data: svfloat64_t, index: i64) -> svfloat64_t;
9623    }
9624    unsafe { _svdupq_lane_f64(data, index.as_signed()) }
9625}
9626#[doc = "Broadcast a quadword of scalars"]
9627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s8])"]
9628#[inline(always)]
9629#[target_feature(enable = "sve")]
9630#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9631#[cfg_attr(test, assert_instr(tbl))]
9632pub fn svdupq_lane_s8(data: svint8_t, index: u64) -> svint8_t {
9633    unsafe extern "unadjusted" {
9634        #[cfg_attr(
9635            target_arch = "aarch64",
9636            link_name = "llvm.aarch64.sve.dupq.lane.nxv16i8"
9637        )]
9638        fn _svdupq_lane_s8(data: svint8_t, index: i64) -> svint8_t;
9639    }
9640    unsafe { _svdupq_lane_s8(data, index.as_signed()) }
9641}
9642#[doc = "Broadcast a quadword of scalars"]
9643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s16])"]
9644#[inline(always)]
9645#[target_feature(enable = "sve")]
9646#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9647#[cfg_attr(test, assert_instr(tbl))]
9648pub fn svdupq_lane_s16(data: svint16_t, index: u64) -> svint16_t {
9649    unsafe extern "unadjusted" {
9650        #[cfg_attr(
9651            target_arch = "aarch64",
9652            link_name = "llvm.aarch64.sve.dupq.lane.nxv8i16"
9653        )]
9654        fn _svdupq_lane_s16(data: svint16_t, index: i64) -> svint16_t;
9655    }
9656    unsafe { _svdupq_lane_s16(data, index.as_signed()) }
9657}
9658#[doc = "Broadcast a quadword of scalars"]
9659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s32])"]
9660#[inline(always)]
9661#[target_feature(enable = "sve")]
9662#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9663#[cfg_attr(test, assert_instr(tbl))]
9664pub fn svdupq_lane_s32(data: svint32_t, index: u64) -> svint32_t {
9665    unsafe extern "unadjusted" {
9666        #[cfg_attr(
9667            target_arch = "aarch64",
9668            link_name = "llvm.aarch64.sve.dupq.lane.nxv4i32"
9669        )]
9670        fn _svdupq_lane_s32(data: svint32_t, index: i64) -> svint32_t;
9671    }
9672    unsafe { _svdupq_lane_s32(data, index.as_signed()) }
9673}
9674#[doc = "Broadcast a quadword of scalars"]
9675#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s64])"]
9676#[inline(always)]
9677#[target_feature(enable = "sve")]
9678#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9679#[cfg_attr(test, assert_instr(tbl))]
9680pub fn svdupq_lane_s64(data: svint64_t, index: u64) -> svint64_t {
9681    unsafe extern "unadjusted" {
9682        #[cfg_attr(
9683            target_arch = "aarch64",
9684            link_name = "llvm.aarch64.sve.dupq.lane.nxv2i64"
9685        )]
9686        fn _svdupq_lane_s64(data: svint64_t, index: i64) -> svint64_t;
9687    }
9688    unsafe { _svdupq_lane_s64(data, index.as_signed()) }
9689}
9690#[doc = "Broadcast a quadword of scalars"]
9691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u8])"]
9692#[inline(always)]
9693#[target_feature(enable = "sve")]
9694#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9695#[cfg_attr(test, assert_instr(tbl))]
9696pub fn svdupq_lane_u8(data: svuint8_t, index: u64) -> svuint8_t {
9697    unsafe { svdupq_lane_s8(data.as_signed(), index).as_unsigned() }
9698}
9699#[doc = "Broadcast a quadword of scalars"]
9700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u16])"]
9701#[inline(always)]
9702#[target_feature(enable = "sve")]
9703#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9704#[cfg_attr(test, assert_instr(tbl))]
9705pub fn svdupq_lane_u16(data: svuint16_t, index: u64) -> svuint16_t {
9706    unsafe { svdupq_lane_s16(data.as_signed(), index).as_unsigned() }
9707}
9708#[doc = "Broadcast a quadword of scalars"]
9709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u32])"]
9710#[inline(always)]
9711#[target_feature(enable = "sve")]
9712#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9713#[cfg_attr(test, assert_instr(tbl))]
9714pub fn svdupq_lane_u32(data: svuint32_t, index: u64) -> svuint32_t {
9715    unsafe { svdupq_lane_s32(data.as_signed(), index).as_unsigned() }
9716}
9717#[doc = "Broadcast a quadword of scalars"]
9718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u64])"]
9719#[inline(always)]
9720#[target_feature(enable = "sve")]
9721#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9722#[cfg_attr(test, assert_instr(tbl))]
9723pub fn svdupq_lane_u64(data: svuint64_t, index: u64) -> svuint64_t {
9724    unsafe { svdupq_lane_s64(data.as_signed(), index).as_unsigned() }
9725}
9726#[doc = "Broadcast a quadword of scalars"]
9727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b16)"]
9728#[inline(always)]
9729#[target_feature(enable = "sve")]
9730#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9731pub fn svdupq_n_b16(
9732    x0: bool,
9733    x1: bool,
9734    x2: bool,
9735    x3: bool,
9736    x4: bool,
9737    x5: bool,
9738    x6: bool,
9739    x7: bool,
9740) -> svbool_t {
9741    let op1 = svdupq_n_s16(
9742        x0 as i16, x1 as i16, x2 as i16, x3 as i16, x4 as i16, x5 as i16, x6 as i16, x7 as i16,
9743    );
9744    svcmpne_wide_s16(svptrue_b16(), op1, svdup_n_s64(0))
9745}
9746#[doc = "Broadcast a quadword of scalars"]
9747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b32)"]
9748#[inline(always)]
9749#[target_feature(enable = "sve")]
9750#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9751pub fn svdupq_n_b32(x0: bool, x1: bool, x2: bool, x3: bool) -> svbool_t {
9752    let op1 = svdupq_n_s32(x0 as i32, x1 as i32, x2 as i32, x3 as i32);
9753    svcmpne_wide_s32(svptrue_b32(), op1, svdup_n_s64(0))
9754}
9755#[doc = "Broadcast a quadword of scalars"]
9756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b64)"]
9757#[inline(always)]
9758#[target_feature(enable = "sve")]
9759#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9760pub fn svdupq_n_b64(x0: bool, x1: bool) -> svbool_t {
9761    let op1 = svdupq_n_s64(x0 as i64, x1 as i64);
9762    svcmpne_s64(svptrue_b64(), op1, svdup_n_s64(0))
9763}
9764#[doc = "Broadcast a quadword of scalars"]
9765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b8)"]
9766#[inline(always)]
9767#[target_feature(enable = "sve")]
9768#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9769pub fn svdupq_n_b8(
9770    x0: bool,
9771    x1: bool,
9772    x2: bool,
9773    x3: bool,
9774    x4: bool,
9775    x5: bool,
9776    x6: bool,
9777    x7: bool,
9778    x8: bool,
9779    x9: bool,
9780    x10: bool,
9781    x11: bool,
9782    x12: bool,
9783    x13: bool,
9784    x14: bool,
9785    x15: bool,
9786) -> svbool_t {
9787    let op1 = svdupq_n_s8(
9788        x0 as i8, x1 as i8, x2 as i8, x3 as i8, x4 as i8, x5 as i8, x6 as i8, x7 as i8, x8 as i8,
9789        x9 as i8, x10 as i8, x11 as i8, x12 as i8, x13 as i8, x14 as i8, x15 as i8,
9790    );
9791    svcmpne_wide_s8(svptrue_b8(), op1, svdup_n_s64(0))
9792}
9793#[doc = "Broadcast a quadword of scalars"]
9794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f32)"]
9795#[inline(always)]
9796#[target_feature(enable = "sve")]
9797#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9798pub fn svdupq_n_f32(x0: f32, x1: f32, x2: f32, x3: f32) -> svfloat32_t {
9799    unsafe extern "unadjusted" {
9800        #[cfg_attr(
9801            target_arch = "aarch64",
9802            link_name = "llvm.experimental.vector.insert.nxv4f32.v4f32"
9803        )]
9804        fn _svdupq_n_f32(op0: svfloat32_t, op1: float32x4_t, idx: i64) -> svfloat32_t;
9805    }
9806    unsafe {
9807        let op = _svdupq_n_f32(svundef_f32(), crate::mem::transmute([x0, x1, x2, x3]), 0);
9808        svdupq_lane_f32(op, 0)
9809    }
9810}
9811#[doc = "Broadcast a quadword of scalars"]
9812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s32)"]
9813#[inline(always)]
9814#[target_feature(enable = "sve")]
9815#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9816pub fn svdupq_n_s32(x0: i32, x1: i32, x2: i32, x3: i32) -> svint32_t {
9817    unsafe extern "unadjusted" {
9818        #[cfg_attr(
9819            target_arch = "aarch64",
9820            link_name = "llvm.experimental.vector.insert.nxv4i32.v4i32"
9821        )]
9822        fn _svdupq_n_s32(op0: svint32_t, op1: int32x4_t, idx: i64) -> svint32_t;
9823    }
9824    unsafe {
9825        let op = _svdupq_n_s32(svundef_s32(), crate::mem::transmute([x0, x1, x2, x3]), 0);
9826        svdupq_lane_s32(op, 0)
9827    }
9828}
9829#[doc = "Broadcast a quadword of scalars"]
9830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u32)"]
9831#[inline(always)]
9832#[target_feature(enable = "sve")]
9833#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9834pub fn svdupq_n_u32(x0: u32, x1: u32, x2: u32, x3: u32) -> svuint32_t {
9835    unsafe {
9836        svdupq_n_s32(
9837            x0.as_signed(),
9838            x1.as_signed(),
9839            x2.as_signed(),
9840            x3.as_signed(),
9841        )
9842        .as_unsigned()
9843    }
9844}
9845#[doc = "Broadcast a quadword of scalars"]
9846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f64)"]
9847#[inline(always)]
9848#[target_feature(enable = "sve")]
9849#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9850pub fn svdupq_n_f64(x0: f64, x1: f64) -> svfloat64_t {
9851    unsafe extern "unadjusted" {
9852        #[cfg_attr(
9853            target_arch = "aarch64",
9854            link_name = "llvm.experimental.vector.insert.nxv2f64.v2f64"
9855        )]
9856        fn _svdupq_n_f64(op0: svfloat64_t, op1: float64x2_t, idx: i64) -> svfloat64_t;
9857    }
9858    unsafe {
9859        let op = _svdupq_n_f64(svundef_f64(), crate::mem::transmute([x0, x1]), 0);
9860        svdupq_lane_f64(op, 0)
9861    }
9862}
9863#[doc = "Broadcast a quadword of scalars"]
9864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s64)"]
9865#[inline(always)]
9866#[target_feature(enable = "sve")]
9867#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9868pub fn svdupq_n_s64(x0: i64, x1: i64) -> svint64_t {
9869    unsafe extern "unadjusted" {
9870        #[cfg_attr(
9871            target_arch = "aarch64",
9872            link_name = "llvm.experimental.vector.insert.nxv2i64.v2i64"
9873        )]
9874        fn _svdupq_n_s64(op0: svint64_t, op1: int64x2_t, idx: i64) -> svint64_t;
9875    }
9876    unsafe {
9877        let op = _svdupq_n_s64(svundef_s64(), crate::mem::transmute([x0, x1]), 0);
9878        svdupq_lane_s64(op, 0)
9879    }
9880}
9881#[doc = "Broadcast a quadword of scalars"]
9882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u64)"]
9883#[inline(always)]
9884#[target_feature(enable = "sve")]
9885#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9886pub fn svdupq_n_u64(x0: u64, x1: u64) -> svuint64_t {
9887    unsafe { svdupq_n_s64(x0.as_signed(), x1.as_signed()).as_unsigned() }
9888}
9889#[doc = "Broadcast a quadword of scalars"]
9890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s16)"]
9891#[inline(always)]
9892#[target_feature(enable = "sve")]
9893#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9894pub fn svdupq_n_s16(
9895    x0: i16,
9896    x1: i16,
9897    x2: i16,
9898    x3: i16,
9899    x4: i16,
9900    x5: i16,
9901    x6: i16,
9902    x7: i16,
9903) -> svint16_t {
9904    unsafe extern "unadjusted" {
9905        #[cfg_attr(
9906            target_arch = "aarch64",
9907            link_name = "llvm.experimental.vector.insert.nxv8i16.v8i16"
9908        )]
9909        fn _svdupq_n_s16(op0: svint16_t, op1: int16x8_t, idx: i64) -> svint16_t;
9910    }
9911    unsafe {
9912        let op = _svdupq_n_s16(
9913            svundef_s16(),
9914            crate::mem::transmute([x0, x1, x2, x3, x4, x5, x6, x7]),
9915            0,
9916        );
9917        svdupq_lane_s16(op, 0)
9918    }
9919}
9920#[doc = "Broadcast a quadword of scalars"]
9921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u16)"]
9922#[inline(always)]
9923#[target_feature(enable = "sve")]
9924#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9925pub fn svdupq_n_u16(
9926    x0: u16,
9927    x1: u16,
9928    x2: u16,
9929    x3: u16,
9930    x4: u16,
9931    x5: u16,
9932    x6: u16,
9933    x7: u16,
9934) -> svuint16_t {
9935    unsafe {
9936        svdupq_n_s16(
9937            x0.as_signed(),
9938            x1.as_signed(),
9939            x2.as_signed(),
9940            x3.as_signed(),
9941            x4.as_signed(),
9942            x5.as_signed(),
9943            x6.as_signed(),
9944            x7.as_signed(),
9945        )
9946        .as_unsigned()
9947    }
9948}
9949#[doc = "Broadcast a quadword of scalars"]
9950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s8)"]
9951#[inline(always)]
9952#[target_feature(enable = "sve")]
9953#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9954pub fn svdupq_n_s8(
9955    x0: i8,
9956    x1: i8,
9957    x2: i8,
9958    x3: i8,
9959    x4: i8,
9960    x5: i8,
9961    x6: i8,
9962    x7: i8,
9963    x8: i8,
9964    x9: i8,
9965    x10: i8,
9966    x11: i8,
9967    x12: i8,
9968    x13: i8,
9969    x14: i8,
9970    x15: i8,
9971) -> svint8_t {
9972    unsafe extern "unadjusted" {
9973        #[cfg_attr(
9974            target_arch = "aarch64",
9975            link_name = "llvm.experimental.vector.insert.nxv16i8.v16i8"
9976        )]
9977        fn _svdupq_n_s8(op0: svint8_t, op1: int8x16_t, idx: i64) -> svint8_t;
9978    }
9979    unsafe {
9980        let op = _svdupq_n_s8(
9981            svundef_s8(),
9982            crate::mem::transmute([
9983                x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,
9984            ]),
9985            0,
9986        );
9987        svdupq_lane_s8(op, 0)
9988    }
9989}
9990#[doc = "Broadcast a quadword of scalars"]
9991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u8)"]
9992#[inline(always)]
9993#[target_feature(enable = "sve")]
9994#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
9995pub fn svdupq_n_u8(
9996    x0: u8,
9997    x1: u8,
9998    x2: u8,
9999    x3: u8,
10000    x4: u8,
10001    x5: u8,
10002    x6: u8,
10003    x7: u8,
10004    x8: u8,
10005    x9: u8,
10006    x10: u8,
10007    x11: u8,
10008    x12: u8,
10009    x13: u8,
10010    x14: u8,
10011    x15: u8,
10012) -> svuint8_t {
10013    unsafe {
10014        svdupq_n_s8(
10015            x0.as_signed(),
10016            x1.as_signed(),
10017            x2.as_signed(),
10018            x3.as_signed(),
10019            x4.as_signed(),
10020            x5.as_signed(),
10021            x6.as_signed(),
10022            x7.as_signed(),
10023            x8.as_signed(),
10024            x9.as_signed(),
10025            x10.as_signed(),
10026            x11.as_signed(),
10027            x12.as_signed(),
10028            x13.as_signed(),
10029            x14.as_signed(),
10030            x15.as_signed(),
10031        )
10032        .as_unsigned()
10033    }
10034}
10035#[doc = "Bitwise exclusive OR"]
10036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_b]_z)"]
10037#[inline(always)]
10038#[target_feature(enable = "sve")]
10039#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10040#[cfg_attr(test, assert_instr(eor))]
10041pub fn sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
10042    unsafe extern "unadjusted" {
10043        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.z.nvx16i1")]
10044        fn _sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
10045    }
10046    unsafe { _sveor_b_z(pg, op1, op2) }
10047}
10048#[doc = "Bitwise exclusive OR"]
10049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_m)"]
10050#[inline(always)]
10051#[target_feature(enable = "sve")]
10052#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10053#[cfg_attr(test, assert_instr(eor))]
10054pub fn sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
10055    unsafe extern "unadjusted" {
10056        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv16i8")]
10057        fn _sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
10058    }
10059    unsafe { _sveor_s8_m(pg, op1, op2) }
10060}
10061#[doc = "Bitwise exclusive OR"]
10062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_m)"]
10063#[inline(always)]
10064#[target_feature(enable = "sve")]
10065#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10066#[cfg_attr(test, assert_instr(eor))]
10067pub fn sveor_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
10068    sveor_s8_m(pg, op1, svdup_n_s8(op2))
10069}
10070#[doc = "Bitwise exclusive OR"]
10071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_x)"]
10072#[inline(always)]
10073#[target_feature(enable = "sve")]
10074#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10075#[cfg_attr(test, assert_instr(eor))]
10076pub fn sveor_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
10077    sveor_s8_m(pg, op1, op2)
10078}
10079#[doc = "Bitwise exclusive OR"]
10080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_x)"]
10081#[inline(always)]
10082#[target_feature(enable = "sve")]
10083#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10084#[cfg_attr(test, assert_instr(eor))]
10085pub fn sveor_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
10086    sveor_s8_x(pg, op1, svdup_n_s8(op2))
10087}
10088#[doc = "Bitwise exclusive OR"]
10089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_z)"]
10090#[inline(always)]
10091#[target_feature(enable = "sve")]
10092#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10093#[cfg_attr(test, assert_instr(eor))]
10094pub fn sveor_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
10095    sveor_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
10096}
10097#[doc = "Bitwise exclusive OR"]
10098#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_z)"]
10099#[inline(always)]
10100#[target_feature(enable = "sve")]
10101#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10102#[cfg_attr(test, assert_instr(eor))]
10103pub fn sveor_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
10104    sveor_s8_z(pg, op1, svdup_n_s8(op2))
10105}
10106#[doc = "Bitwise exclusive OR"]
10107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_m)"]
10108#[inline(always)]
10109#[target_feature(enable = "sve")]
10110#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10111#[cfg_attr(test, assert_instr(eor))]
10112pub fn sveor_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
10113    unsafe extern "unadjusted" {
10114        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv8i16")]
10115        fn _sveor_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
10116    }
10117    unsafe { _sveor_s16_m(pg.sve_into(), op1, op2) }
10118}
10119#[doc = "Bitwise exclusive OR"]
10120#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_m)"]
10121#[inline(always)]
10122#[target_feature(enable = "sve")]
10123#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10124#[cfg_attr(test, assert_instr(eor))]
10125pub fn sveor_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
10126    sveor_s16_m(pg, op1, svdup_n_s16(op2))
10127}
10128#[doc = "Bitwise exclusive OR"]
10129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_x)"]
10130#[inline(always)]
10131#[target_feature(enable = "sve")]
10132#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10133#[cfg_attr(test, assert_instr(eor))]
10134pub fn sveor_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
10135    sveor_s16_m(pg, op1, op2)
10136}
10137#[doc = "Bitwise exclusive OR"]
10138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_x)"]
10139#[inline(always)]
10140#[target_feature(enable = "sve")]
10141#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10142#[cfg_attr(test, assert_instr(eor))]
10143pub fn sveor_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
10144    sveor_s16_x(pg, op1, svdup_n_s16(op2))
10145}
10146#[doc = "Bitwise exclusive OR"]
10147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_z)"]
10148#[inline(always)]
10149#[target_feature(enable = "sve")]
10150#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10151#[cfg_attr(test, assert_instr(eor))]
10152pub fn sveor_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
10153    sveor_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
10154}
10155#[doc = "Bitwise exclusive OR"]
10156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_z)"]
10157#[inline(always)]
10158#[target_feature(enable = "sve")]
10159#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10160#[cfg_attr(test, assert_instr(eor))]
10161pub fn sveor_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
10162    sveor_s16_z(pg, op1, svdup_n_s16(op2))
10163}
10164#[doc = "Bitwise exclusive OR"]
10165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_m)"]
10166#[inline(always)]
10167#[target_feature(enable = "sve")]
10168#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10169#[cfg_attr(test, assert_instr(eor))]
10170pub fn sveor_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
10171    unsafe extern "unadjusted" {
10172        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv4i32")]
10173        fn _sveor_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
10174    }
10175    unsafe { _sveor_s32_m(pg.sve_into(), op1, op2) }
10176}
10177#[doc = "Bitwise exclusive OR"]
10178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_m)"]
10179#[inline(always)]
10180#[target_feature(enable = "sve")]
10181#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10182#[cfg_attr(test, assert_instr(eor))]
10183pub fn sveor_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
10184    sveor_s32_m(pg, op1, svdup_n_s32(op2))
10185}
10186#[doc = "Bitwise exclusive OR"]
10187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_x)"]
10188#[inline(always)]
10189#[target_feature(enable = "sve")]
10190#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10191#[cfg_attr(test, assert_instr(eor))]
10192pub fn sveor_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
10193    sveor_s32_m(pg, op1, op2)
10194}
10195#[doc = "Bitwise exclusive OR"]
10196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_x)"]
10197#[inline(always)]
10198#[target_feature(enable = "sve")]
10199#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10200#[cfg_attr(test, assert_instr(eor))]
10201pub fn sveor_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
10202    sveor_s32_x(pg, op1, svdup_n_s32(op2))
10203}
10204#[doc = "Bitwise exclusive OR"]
10205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_z)"]
10206#[inline(always)]
10207#[target_feature(enable = "sve")]
10208#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10209#[cfg_attr(test, assert_instr(eor))]
10210pub fn sveor_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
10211    sveor_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
10212}
10213#[doc = "Bitwise exclusive OR"]
10214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_z)"]
10215#[inline(always)]
10216#[target_feature(enable = "sve")]
10217#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10218#[cfg_attr(test, assert_instr(eor))]
10219pub fn sveor_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
10220    sveor_s32_z(pg, op1, svdup_n_s32(op2))
10221}
10222#[doc = "Bitwise exclusive OR"]
10223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_m)"]
10224#[inline(always)]
10225#[target_feature(enable = "sve")]
10226#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10227#[cfg_attr(test, assert_instr(eor))]
10228pub fn sveor_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
10229    unsafe extern "unadjusted" {
10230        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv2i64")]
10231        fn _sveor_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
10232    }
10233    unsafe { _sveor_s64_m(pg.sve_into(), op1, op2) }
10234}
10235#[doc = "Bitwise exclusive OR"]
10236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_m)"]
10237#[inline(always)]
10238#[target_feature(enable = "sve")]
10239#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10240#[cfg_attr(test, assert_instr(eor))]
10241pub fn sveor_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
10242    sveor_s64_m(pg, op1, svdup_n_s64(op2))
10243}
10244#[doc = "Bitwise exclusive OR"]
10245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_x)"]
10246#[inline(always)]
10247#[target_feature(enable = "sve")]
10248#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10249#[cfg_attr(test, assert_instr(eor))]
10250pub fn sveor_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
10251    sveor_s64_m(pg, op1, op2)
10252}
10253#[doc = "Bitwise exclusive OR"]
10254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_x)"]
10255#[inline(always)]
10256#[target_feature(enable = "sve")]
10257#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10258#[cfg_attr(test, assert_instr(eor))]
10259pub fn sveor_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
10260    sveor_s64_x(pg, op1, svdup_n_s64(op2))
10261}
10262#[doc = "Bitwise exclusive OR"]
10263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_z)"]
10264#[inline(always)]
10265#[target_feature(enable = "sve")]
10266#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10267#[cfg_attr(test, assert_instr(eor))]
10268pub fn sveor_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
10269    sveor_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
10270}
10271#[doc = "Bitwise exclusive OR"]
10272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_z)"]
10273#[inline(always)]
10274#[target_feature(enable = "sve")]
10275#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10276#[cfg_attr(test, assert_instr(eor))]
10277pub fn sveor_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
10278    sveor_s64_z(pg, op1, svdup_n_s64(op2))
10279}
10280#[doc = "Bitwise exclusive OR"]
10281#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_m)"]
10282#[inline(always)]
10283#[target_feature(enable = "sve")]
10284#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10285#[cfg_attr(test, assert_instr(eor))]
10286pub fn sveor_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
10287    unsafe { sveor_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
10288}
10289#[doc = "Bitwise exclusive OR"]
10290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_m)"]
10291#[inline(always)]
10292#[target_feature(enable = "sve")]
10293#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10294#[cfg_attr(test, assert_instr(eor))]
10295pub fn sveor_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
10296    sveor_u8_m(pg, op1, svdup_n_u8(op2))
10297}
10298#[doc = "Bitwise exclusive OR"]
10299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_x)"]
10300#[inline(always)]
10301#[target_feature(enable = "sve")]
10302#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10303#[cfg_attr(test, assert_instr(eor))]
10304pub fn sveor_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
10305    sveor_u8_m(pg, op1, op2)
10306}
10307#[doc = "Bitwise exclusive OR"]
10308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_x)"]
10309#[inline(always)]
10310#[target_feature(enable = "sve")]
10311#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10312#[cfg_attr(test, assert_instr(eor))]
10313pub fn sveor_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
10314    sveor_u8_x(pg, op1, svdup_n_u8(op2))
10315}
10316#[doc = "Bitwise exclusive OR"]
10317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_z)"]
10318#[inline(always)]
10319#[target_feature(enable = "sve")]
10320#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10321#[cfg_attr(test, assert_instr(eor))]
10322pub fn sveor_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
10323    sveor_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
10324}
10325#[doc = "Bitwise exclusive OR"]
10326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_z)"]
10327#[inline(always)]
10328#[target_feature(enable = "sve")]
10329#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10330#[cfg_attr(test, assert_instr(eor))]
10331pub fn sveor_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
10332    sveor_u8_z(pg, op1, svdup_n_u8(op2))
10333}
10334#[doc = "Bitwise exclusive OR"]
10335#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_m)"]
10336#[inline(always)]
10337#[target_feature(enable = "sve")]
10338#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10339#[cfg_attr(test, assert_instr(eor))]
10340pub fn sveor_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
10341    unsafe { sveor_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
10342}
10343#[doc = "Bitwise exclusive OR"]
10344#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_m)"]
10345#[inline(always)]
10346#[target_feature(enable = "sve")]
10347#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10348#[cfg_attr(test, assert_instr(eor))]
10349pub fn sveor_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
10350    sveor_u16_m(pg, op1, svdup_n_u16(op2))
10351}
10352#[doc = "Bitwise exclusive OR"]
10353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_x)"]
10354#[inline(always)]
10355#[target_feature(enable = "sve")]
10356#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10357#[cfg_attr(test, assert_instr(eor))]
10358pub fn sveor_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
10359    sveor_u16_m(pg, op1, op2)
10360}
10361#[doc = "Bitwise exclusive OR"]
10362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_x)"]
10363#[inline(always)]
10364#[target_feature(enable = "sve")]
10365#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10366#[cfg_attr(test, assert_instr(eor))]
10367pub fn sveor_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
10368    sveor_u16_x(pg, op1, svdup_n_u16(op2))
10369}
10370#[doc = "Bitwise exclusive OR"]
10371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_z)"]
10372#[inline(always)]
10373#[target_feature(enable = "sve")]
10374#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10375#[cfg_attr(test, assert_instr(eor))]
10376pub fn sveor_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
10377    sveor_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
10378}
10379#[doc = "Bitwise exclusive OR"]
10380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_z)"]
10381#[inline(always)]
10382#[target_feature(enable = "sve")]
10383#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10384#[cfg_attr(test, assert_instr(eor))]
10385pub fn sveor_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
10386    sveor_u16_z(pg, op1, svdup_n_u16(op2))
10387}
10388#[doc = "Bitwise exclusive OR"]
10389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_m)"]
10390#[inline(always)]
10391#[target_feature(enable = "sve")]
10392#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10393#[cfg_attr(test, assert_instr(eor))]
10394pub fn sveor_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
10395    unsafe { sveor_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
10396}
10397#[doc = "Bitwise exclusive OR"]
10398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_m)"]
10399#[inline(always)]
10400#[target_feature(enable = "sve")]
10401#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10402#[cfg_attr(test, assert_instr(eor))]
10403pub fn sveor_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
10404    sveor_u32_m(pg, op1, svdup_n_u32(op2))
10405}
10406#[doc = "Bitwise exclusive OR"]
10407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_x)"]
10408#[inline(always)]
10409#[target_feature(enable = "sve")]
10410#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10411#[cfg_attr(test, assert_instr(eor))]
10412pub fn sveor_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
10413    sveor_u32_m(pg, op1, op2)
10414}
10415#[doc = "Bitwise exclusive OR"]
10416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_x)"]
10417#[inline(always)]
10418#[target_feature(enable = "sve")]
10419#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10420#[cfg_attr(test, assert_instr(eor))]
10421pub fn sveor_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
10422    sveor_u32_x(pg, op1, svdup_n_u32(op2))
10423}
10424#[doc = "Bitwise exclusive OR"]
10425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_z)"]
10426#[inline(always)]
10427#[target_feature(enable = "sve")]
10428#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10429#[cfg_attr(test, assert_instr(eor))]
10430pub fn sveor_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
10431    sveor_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
10432}
10433#[doc = "Bitwise exclusive OR"]
10434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_z)"]
10435#[inline(always)]
10436#[target_feature(enable = "sve")]
10437#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10438#[cfg_attr(test, assert_instr(eor))]
10439pub fn sveor_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
10440    sveor_u32_z(pg, op1, svdup_n_u32(op2))
10441}
10442#[doc = "Bitwise exclusive OR"]
10443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_m)"]
10444#[inline(always)]
10445#[target_feature(enable = "sve")]
10446#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10447#[cfg_attr(test, assert_instr(eor))]
10448pub fn sveor_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
10449    unsafe { sveor_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
10450}
10451#[doc = "Bitwise exclusive OR"]
10452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_m)"]
10453#[inline(always)]
10454#[target_feature(enable = "sve")]
10455#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10456#[cfg_attr(test, assert_instr(eor))]
10457pub fn sveor_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
10458    sveor_u64_m(pg, op1, svdup_n_u64(op2))
10459}
10460#[doc = "Bitwise exclusive OR"]
10461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_x)"]
10462#[inline(always)]
10463#[target_feature(enable = "sve")]
10464#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10465#[cfg_attr(test, assert_instr(eor))]
10466pub fn sveor_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
10467    sveor_u64_m(pg, op1, op2)
10468}
10469#[doc = "Bitwise exclusive OR"]
10470#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_x)"]
10471#[inline(always)]
10472#[target_feature(enable = "sve")]
10473#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10474#[cfg_attr(test, assert_instr(eor))]
10475pub fn sveor_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
10476    sveor_u64_x(pg, op1, svdup_n_u64(op2))
10477}
10478#[doc = "Bitwise exclusive OR"]
10479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_z)"]
10480#[inline(always)]
10481#[target_feature(enable = "sve")]
10482#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10483#[cfg_attr(test, assert_instr(eor))]
10484pub fn sveor_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
10485    sveor_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
10486}
10487#[doc = "Bitwise exclusive OR"]
10488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_z)"]
10489#[inline(always)]
10490#[target_feature(enable = "sve")]
10491#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10492#[cfg_attr(test, assert_instr(eor))]
10493pub fn sveor_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
10494    sveor_u64_z(pg, op1, svdup_n_u64(op2))
10495}
10496#[doc = "Bitwise exclusive OR reduction to scalar"]
10497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s8])"]
10498#[inline(always)]
10499#[target_feature(enable = "sve")]
10500#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10501#[cfg_attr(test, assert_instr(eorv))]
10502pub fn sveorv_s8(pg: svbool_t, op: svint8_t) -> i8 {
10503    unsafe extern "unadjusted" {
10504        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv16i8")]
10505        fn _sveorv_s8(pg: svbool_t, op: svint8_t) -> i8;
10506    }
10507    unsafe { _sveorv_s8(pg, op) }
10508}
10509#[doc = "Bitwise exclusive OR reduction to scalar"]
10510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s16])"]
10511#[inline(always)]
10512#[target_feature(enable = "sve")]
10513#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10514#[cfg_attr(test, assert_instr(eorv))]
10515pub fn sveorv_s16(pg: svbool_t, op: svint16_t) -> i16 {
10516    unsafe extern "unadjusted" {
10517        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv8i16")]
10518        fn _sveorv_s16(pg: svbool8_t, op: svint16_t) -> i16;
10519    }
10520    unsafe { _sveorv_s16(pg.sve_into(), op) }
10521}
10522#[doc = "Bitwise exclusive OR reduction to scalar"]
10523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s32])"]
10524#[inline(always)]
10525#[target_feature(enable = "sve")]
10526#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10527#[cfg_attr(test, assert_instr(eorv))]
10528pub fn sveorv_s32(pg: svbool_t, op: svint32_t) -> i32 {
10529    unsafe extern "unadjusted" {
10530        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv4i32")]
10531        fn _sveorv_s32(pg: svbool4_t, op: svint32_t) -> i32;
10532    }
10533    unsafe { _sveorv_s32(pg.sve_into(), op) }
10534}
10535#[doc = "Bitwise exclusive OR reduction to scalar"]
10536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s64])"]
10537#[inline(always)]
10538#[target_feature(enable = "sve")]
10539#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10540#[cfg_attr(test, assert_instr(eorv))]
10541pub fn sveorv_s64(pg: svbool_t, op: svint64_t) -> i64 {
10542    unsafe extern "unadjusted" {
10543        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv2i64")]
10544        fn _sveorv_s64(pg: svbool2_t, op: svint64_t) -> i64;
10545    }
10546    unsafe { _sveorv_s64(pg.sve_into(), op) }
10547}
10548#[doc = "Bitwise exclusive OR reduction to scalar"]
10549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u8])"]
10550#[inline(always)]
10551#[target_feature(enable = "sve")]
10552#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10553#[cfg_attr(test, assert_instr(eorv))]
10554pub fn sveorv_u8(pg: svbool_t, op: svuint8_t) -> u8 {
10555    unsafe { sveorv_s8(pg, op.as_signed()).as_unsigned() }
10556}
10557#[doc = "Bitwise exclusive OR reduction to scalar"]
10558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u16])"]
10559#[inline(always)]
10560#[target_feature(enable = "sve")]
10561#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10562#[cfg_attr(test, assert_instr(eorv))]
10563pub fn sveorv_u16(pg: svbool_t, op: svuint16_t) -> u16 {
10564    unsafe { sveorv_s16(pg, op.as_signed()).as_unsigned() }
10565}
10566#[doc = "Bitwise exclusive OR reduction to scalar"]
10567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u32])"]
10568#[inline(always)]
10569#[target_feature(enable = "sve")]
10570#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10571#[cfg_attr(test, assert_instr(eorv))]
10572pub fn sveorv_u32(pg: svbool_t, op: svuint32_t) -> u32 {
10573    unsafe { sveorv_s32(pg, op.as_signed()).as_unsigned() }
10574}
10575#[doc = "Bitwise exclusive OR reduction to scalar"]
10576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u64])"]
10577#[inline(always)]
10578#[target_feature(enable = "sve")]
10579#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10580#[cfg_attr(test, assert_instr(eorv))]
10581pub fn sveorv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
10582    unsafe { sveorv_s64(pg, op.as_signed()).as_unsigned() }
10583}
10584#[doc = "Floating-point exponential accelerator"]
10585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f32])"]
10586#[inline(always)]
10587#[target_feature(enable = "sve")]
10588#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10589#[cfg_attr(test, assert_instr(fexpa))]
10590pub fn svexpa_f32(op: svuint32_t) -> svfloat32_t {
10591    unsafe extern "unadjusted" {
10592        #[cfg_attr(
10593            target_arch = "aarch64",
10594            link_name = "llvm.aarch64.sve.fexpa.x.nxv4f32 "
10595        )]
10596        fn _svexpa_f32(op: svint32_t) -> svfloat32_t;
10597    }
10598    unsafe { _svexpa_f32(op.as_signed()) }
10599}
10600#[doc = "Floating-point exponential accelerator"]
10601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f64])"]
10602#[inline(always)]
10603#[target_feature(enable = "sve")]
10604#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10605#[cfg_attr(test, assert_instr(fexpa))]
10606pub fn svexpa_f64(op: svuint64_t) -> svfloat64_t {
10607    unsafe extern "unadjusted" {
10608        #[cfg_attr(
10609            target_arch = "aarch64",
10610            link_name = "llvm.aarch64.sve.fexpa.x.nxv2f64 "
10611        )]
10612        fn _svexpa_f64(op: svint64_t) -> svfloat64_t;
10613    }
10614    unsafe { _svexpa_f64(op.as_signed()) }
10615}
10616#[doc = "Extract vector from pair of vectors"]
10617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f32])"]
10618#[inline(always)]
10619#[target_feature(enable = "sve")]
10620#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10621#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
10622pub fn svext_f32<const IMM3: i32>(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
10623    static_assert_range!(IMM3, 0..=63);
10624    unsafe extern "unadjusted" {
10625        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4f32")]
10626        fn _svext_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t;
10627    }
10628    unsafe { _svext_f32(op1, op2, IMM3) }
10629}
10630#[doc = "Extract vector from pair of vectors"]
10631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f64])"]
10632#[inline(always)]
10633#[target_feature(enable = "sve")]
10634#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10635#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
10636pub fn svext_f64<const IMM3: i32>(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
10637    static_assert_range!(IMM3, 0..=31);
10638    unsafe extern "unadjusted" {
10639        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2f64")]
10640        fn _svext_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t;
10641    }
10642    unsafe { _svext_f64(op1, op2, IMM3) }
10643}
10644#[doc = "Extract vector from pair of vectors"]
10645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s8])"]
10646#[inline(always)]
10647#[target_feature(enable = "sve")]
10648#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10649#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
10650pub fn svext_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
10651    static_assert_range!(IMM3, 0..=255);
10652    unsafe extern "unadjusted" {
10653        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv16i8")]
10654        fn _svext_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
10655    }
10656    unsafe { _svext_s8(op1, op2, IMM3) }
10657}
10658#[doc = "Extract vector from pair of vectors"]
10659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s16])"]
10660#[inline(always)]
10661#[target_feature(enable = "sve")]
10662#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10663#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
10664pub fn svext_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
10665    static_assert_range!(IMM3, 0..=127);
10666    unsafe extern "unadjusted" {
10667        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv8i16")]
10668        fn _svext_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
10669    }
10670    unsafe { _svext_s16(op1, op2, IMM3) }
10671}
10672#[doc = "Extract vector from pair of vectors"]
10673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s32])"]
10674#[inline(always)]
10675#[target_feature(enable = "sve")]
10676#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10677#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
10678pub fn svext_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
10679    static_assert_range!(IMM3, 0..=63);
10680    unsafe extern "unadjusted" {
10681        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4i32")]
10682        fn _svext_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
10683    }
10684    unsafe { _svext_s32(op1, op2, IMM3) }
10685}
10686#[doc = "Extract vector from pair of vectors"]
10687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s64])"]
10688#[inline(always)]
10689#[target_feature(enable = "sve")]
10690#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10691#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
10692pub fn svext_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
10693    static_assert_range!(IMM3, 0..=31);
10694    unsafe extern "unadjusted" {
10695        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2i64")]
10696        fn _svext_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
10697    }
10698    unsafe { _svext_s64(op1, op2, IMM3) }
10699}
10700#[doc = "Extract vector from pair of vectors"]
10701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u8])"]
10702#[inline(always)]
10703#[target_feature(enable = "sve")]
10704#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10705#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
10706pub fn svext_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
10707    static_assert_range!(IMM3, 0..=255);
10708    unsafe { svext_s8::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
10709}
10710#[doc = "Extract vector from pair of vectors"]
10711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u16])"]
10712#[inline(always)]
10713#[target_feature(enable = "sve")]
10714#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10715#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
10716pub fn svext_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
10717    static_assert_range!(IMM3, 0..=127);
10718    unsafe { svext_s16::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
10719}
10720#[doc = "Extract vector from pair of vectors"]
10721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u32])"]
10722#[inline(always)]
10723#[target_feature(enable = "sve")]
10724#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10725#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
10726pub fn svext_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
10727    static_assert_range!(IMM3, 0..=63);
10728    unsafe { svext_s32::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
10729}
10730#[doc = "Extract vector from pair of vectors"]
10731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u64])"]
10732#[inline(always)]
10733#[target_feature(enable = "sve")]
10734#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10735#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
10736pub fn svext_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
10737    static_assert_range!(IMM3, 0..=31);
10738    unsafe { svext_s64::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
10739}
10740#[doc = "Sign-extend the low 8 bits"]
10741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_m)"]
10742#[inline(always)]
10743#[target_feature(enable = "sve")]
10744#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10745#[cfg_attr(test, assert_instr(sxtb))]
10746pub fn svextb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
10747    unsafe extern "unadjusted" {
10748        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv8i16")]
10749        fn _svextb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
10750    }
10751    unsafe { _svextb_s16_m(inactive, pg.sve_into(), op) }
10752}
10753#[doc = "Sign-extend the low 8 bits"]
10754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_x)"]
10755#[inline(always)]
10756#[target_feature(enable = "sve")]
10757#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10758#[cfg_attr(test, assert_instr(sxtb))]
10759pub fn svextb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
10760    svextb_s16_m(op, pg, op)
10761}
10762#[doc = "Sign-extend the low 8 bits"]
10763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_z)"]
10764#[inline(always)]
10765#[target_feature(enable = "sve")]
10766#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10767#[cfg_attr(test, assert_instr(sxtb))]
10768pub fn svextb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
10769    svextb_s16_m(svdup_n_s16(0), pg, op)
10770}
10771#[doc = "Sign-extend the low 8 bits"]
10772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_m)"]
10773#[inline(always)]
10774#[target_feature(enable = "sve")]
10775#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10776#[cfg_attr(test, assert_instr(sxtb))]
10777pub fn svextb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
10778    unsafe extern "unadjusted" {
10779        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv4i32")]
10780        fn _svextb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
10781    }
10782    unsafe { _svextb_s32_m(inactive, pg.sve_into(), op) }
10783}
10784#[doc = "Sign-extend the low 8 bits"]
10785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_x)"]
10786#[inline(always)]
10787#[target_feature(enable = "sve")]
10788#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10789#[cfg_attr(test, assert_instr(sxtb))]
10790pub fn svextb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
10791    svextb_s32_m(op, pg, op)
10792}
10793#[doc = "Sign-extend the low 8 bits"]
10794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_z)"]
10795#[inline(always)]
10796#[target_feature(enable = "sve")]
10797#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10798#[cfg_attr(test, assert_instr(sxtb))]
10799pub fn svextb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
10800    svextb_s32_m(svdup_n_s32(0), pg, op)
10801}
10802#[doc = "Sign-extend the low 16 bits"]
10803#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_m)"]
10804#[inline(always)]
10805#[target_feature(enable = "sve")]
10806#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10807#[cfg_attr(test, assert_instr(sxth))]
10808pub fn svexth_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
10809    unsafe extern "unadjusted" {
10810        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv4i32")]
10811        fn _svexth_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
10812    }
10813    unsafe { _svexth_s32_m(inactive, pg.sve_into(), op) }
10814}
10815#[doc = "Sign-extend the low 16 bits"]
10816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_x)"]
10817#[inline(always)]
10818#[target_feature(enable = "sve")]
10819#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10820#[cfg_attr(test, assert_instr(sxth))]
10821pub fn svexth_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
10822    svexth_s32_m(op, pg, op)
10823}
10824#[doc = "Sign-extend the low 16 bits"]
10825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_z)"]
10826#[inline(always)]
10827#[target_feature(enable = "sve")]
10828#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10829#[cfg_attr(test, assert_instr(sxth))]
10830pub fn svexth_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
10831    svexth_s32_m(svdup_n_s32(0), pg, op)
10832}
10833#[doc = "Sign-extend the low 8 bits"]
10834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_m)"]
10835#[inline(always)]
10836#[target_feature(enable = "sve")]
10837#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10838#[cfg_attr(test, assert_instr(sxtb))]
10839pub fn svextb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
10840    unsafe extern "unadjusted" {
10841        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv2i64")]
10842        fn _svextb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
10843    }
10844    unsafe { _svextb_s64_m(inactive, pg.sve_into(), op) }
10845}
10846#[doc = "Sign-extend the low 8 bits"]
10847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_x)"]
10848#[inline(always)]
10849#[target_feature(enable = "sve")]
10850#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10851#[cfg_attr(test, assert_instr(sxtb))]
10852pub fn svextb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
10853    svextb_s64_m(op, pg, op)
10854}
10855#[doc = "Sign-extend the low 8 bits"]
10856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_z)"]
10857#[inline(always)]
10858#[target_feature(enable = "sve")]
10859#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10860#[cfg_attr(test, assert_instr(sxtb))]
10861pub fn svextb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
10862    svextb_s64_m(svdup_n_s64(0), pg, op)
10863}
10864#[doc = "Sign-extend the low 16 bits"]
10865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_m)"]
10866#[inline(always)]
10867#[target_feature(enable = "sve")]
10868#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10869#[cfg_attr(test, assert_instr(sxth))]
10870pub fn svexth_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
10871    unsafe extern "unadjusted" {
10872        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv2i64")]
10873        fn _svexth_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
10874    }
10875    unsafe { _svexth_s64_m(inactive, pg.sve_into(), op) }
10876}
10877#[doc = "Sign-extend the low 16 bits"]
10878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_x)"]
10879#[inline(always)]
10880#[target_feature(enable = "sve")]
10881#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10882#[cfg_attr(test, assert_instr(sxth))]
10883pub fn svexth_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
10884    svexth_s64_m(op, pg, op)
10885}
10886#[doc = "Sign-extend the low 16 bits"]
10887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_z)"]
10888#[inline(always)]
10889#[target_feature(enable = "sve")]
10890#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10891#[cfg_attr(test, assert_instr(sxth))]
10892pub fn svexth_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
10893    svexth_s64_m(svdup_n_s64(0), pg, op)
10894}
10895#[doc = "Sign-extend the low 32 bits"]
10896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_m)"]
10897#[inline(always)]
10898#[target_feature(enable = "sve")]
10899#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10900#[cfg_attr(test, assert_instr(sxtw))]
10901pub fn svextw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
10902    unsafe extern "unadjusted" {
10903        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtw.nxv2i64")]
10904        fn _svextw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
10905    }
10906    unsafe { _svextw_s64_m(inactive, pg.sve_into(), op) }
10907}
10908#[doc = "Sign-extend the low 32 bits"]
10909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_x)"]
10910#[inline(always)]
10911#[target_feature(enable = "sve")]
10912#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10913#[cfg_attr(test, assert_instr(sxtw))]
10914pub fn svextw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
10915    svextw_s64_m(op, pg, op)
10916}
10917#[doc = "Sign-extend the low 32 bits"]
10918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_z)"]
10919#[inline(always)]
10920#[target_feature(enable = "sve")]
10921#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10922#[cfg_attr(test, assert_instr(sxtw))]
10923pub fn svextw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
10924    svextw_s64_m(svdup_n_s64(0), pg, op)
10925}
10926#[doc = "Zero-extend the low 8 bits"]
10927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_m)"]
10928#[inline(always)]
10929#[target_feature(enable = "sve")]
10930#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10931#[cfg_attr(test, assert_instr(uxtb))]
10932pub fn svextb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
10933    unsafe extern "unadjusted" {
10934        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv8i16")]
10935        fn _svextb_u16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
10936    }
10937    unsafe { _svextb_u16_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() }
10938}
10939#[doc = "Zero-extend the low 8 bits"]
10940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_x)"]
10941#[inline(always)]
10942#[target_feature(enable = "sve")]
10943#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10944#[cfg_attr(test, assert_instr(uxtb))]
10945pub fn svextb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
10946    svextb_u16_m(op, pg, op)
10947}
10948#[doc = "Zero-extend the low 8 bits"]
10949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_z)"]
10950#[inline(always)]
10951#[target_feature(enable = "sve")]
10952#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10953#[cfg_attr(test, assert_instr(uxtb))]
10954pub fn svextb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
10955    svextb_u16_m(svdup_n_u16(0), pg, op)
10956}
10957#[doc = "Zero-extend the low 8 bits"]
10958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_m)"]
10959#[inline(always)]
10960#[target_feature(enable = "sve")]
10961#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10962#[cfg_attr(test, assert_instr(uxtb))]
10963pub fn svextb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
10964    unsafe extern "unadjusted" {
10965        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv4i32")]
10966        fn _svextb_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
10967    }
10968    unsafe { _svextb_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() }
10969}
10970#[doc = "Zero-extend the low 8 bits"]
10971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_x)"]
10972#[inline(always)]
10973#[target_feature(enable = "sve")]
10974#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10975#[cfg_attr(test, assert_instr(uxtb))]
10976pub fn svextb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
10977    svextb_u32_m(op, pg, op)
10978}
10979#[doc = "Zero-extend the low 8 bits"]
10980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_z)"]
10981#[inline(always)]
10982#[target_feature(enable = "sve")]
10983#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10984#[cfg_attr(test, assert_instr(uxtb))]
10985pub fn svextb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
10986    svextb_u32_m(svdup_n_u32(0), pg, op)
10987}
10988#[doc = "Zero-extend the low 16 bits"]
10989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_m)"]
10990#[inline(always)]
10991#[target_feature(enable = "sve")]
10992#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
10993#[cfg_attr(test, assert_instr(uxth))]
10994pub fn svexth_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
10995    unsafe extern "unadjusted" {
10996        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv4i32")]
10997        fn _svexth_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
10998    }
10999    unsafe { _svexth_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() }
11000}
11001#[doc = "Zero-extend the low 16 bits"]
11002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_x)"]
11003#[inline(always)]
11004#[target_feature(enable = "sve")]
11005#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11006#[cfg_attr(test, assert_instr(uxth))]
11007pub fn svexth_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
11008    svexth_u32_m(op, pg, op)
11009}
11010#[doc = "Zero-extend the low 16 bits"]
11011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_z)"]
11012#[inline(always)]
11013#[target_feature(enable = "sve")]
11014#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11015#[cfg_attr(test, assert_instr(uxth))]
11016pub fn svexth_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
11017    svexth_u32_m(svdup_n_u32(0), pg, op)
11018}
11019#[doc = "Zero-extend the low 8 bits"]
11020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_m)"]
11021#[inline(always)]
11022#[target_feature(enable = "sve")]
11023#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11024#[cfg_attr(test, assert_instr(uxtb))]
11025pub fn svextb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
11026    unsafe extern "unadjusted" {
11027        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv2i64")]
11028        fn _svextb_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
11029    }
11030    unsafe { _svextb_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() }
11031}
11032#[doc = "Zero-extend the low 8 bits"]
11033#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_x)"]
11034#[inline(always)]
11035#[target_feature(enable = "sve")]
11036#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11037#[cfg_attr(test, assert_instr(uxtb))]
11038pub fn svextb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
11039    svextb_u64_m(op, pg, op)
11040}
11041#[doc = "Zero-extend the low 8 bits"]
11042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_z)"]
11043#[inline(always)]
11044#[target_feature(enable = "sve")]
11045#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11046#[cfg_attr(test, assert_instr(uxtb))]
11047pub fn svextb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
11048    svextb_u64_m(svdup_n_u64(0), pg, op)
11049}
11050#[doc = "Zero-extend the low 16 bits"]
11051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_m)"]
11052#[inline(always)]
11053#[target_feature(enable = "sve")]
11054#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11055#[cfg_attr(test, assert_instr(uxth))]
11056pub fn svexth_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
11057    unsafe extern "unadjusted" {
11058        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv2i64")]
11059        fn _svexth_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
11060    }
11061    unsafe { _svexth_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() }
11062}
11063#[doc = "Zero-extend the low 16 bits"]
11064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_x)"]
11065#[inline(always)]
11066#[target_feature(enable = "sve")]
11067#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11068#[cfg_attr(test, assert_instr(uxth))]
11069pub fn svexth_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
11070    svexth_u64_m(op, pg, op)
11071}
11072#[doc = "Zero-extend the low 16 bits"]
11073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_z)"]
11074#[inline(always)]
11075#[target_feature(enable = "sve")]
11076#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11077#[cfg_attr(test, assert_instr(uxth))]
11078pub fn svexth_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
11079    svexth_u64_m(svdup_n_u64(0), pg, op)
11080}
11081#[doc = "Zero-extend the low 32 bits"]
11082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_m)"]
11083#[inline(always)]
11084#[target_feature(enable = "sve")]
11085#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11086#[cfg_attr(test, assert_instr(uxtw))]
11087pub fn svextw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
11088    unsafe extern "unadjusted" {
11089        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtw.nxv2i64")]
11090        fn _svextw_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
11091    }
11092    unsafe { _svextw_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() }
11093}
11094#[doc = "Zero-extend the low 32 bits"]
11095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_x)"]
11096#[inline(always)]
11097#[target_feature(enable = "sve")]
11098#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11099#[cfg_attr(test, assert_instr(uxtw))]
11100pub fn svextw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
11101    svextw_u64_m(op, pg, op)
11102}
11103#[doc = "Zero-extend the low 32 bits"]
11104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_z)"]
11105#[inline(always)]
11106#[target_feature(enable = "sve")]
11107#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11108#[cfg_attr(test, assert_instr(uxtw))]
11109pub fn svextw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
11110    svextw_u64_m(svdup_n_u64(0), pg, op)
11111}
11112#[doc = "Extract one vector from a tuple of two vectors"]
11113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f32])"]
11114#[inline(always)]
11115#[target_feature(enable = "sve")]
11116#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11117pub fn svget2_f32<const IMM_INDEX: i32>(tuple: svfloat32x2_t) -> svfloat32_t {
11118    static_assert_range!(IMM_INDEX, 0..=1);
11119    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11120}
11121#[doc = "Extract one vector from a tuple of two vectors"]
11122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f64])"]
11123#[inline(always)]
11124#[target_feature(enable = "sve")]
11125#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11126pub fn svget2_f64<const IMM_INDEX: i32>(tuple: svfloat64x2_t) -> svfloat64_t {
11127    static_assert_range!(IMM_INDEX, 0..=1);
11128    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11129}
11130#[doc = "Extract one vector from a tuple of two vectors"]
11131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s8])"]
11132#[inline(always)]
11133#[target_feature(enable = "sve")]
11134#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11135pub fn svget2_s8<const IMM_INDEX: i32>(tuple: svint8x2_t) -> svint8_t {
11136    static_assert_range!(IMM_INDEX, 0..=1);
11137    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11138}
11139#[doc = "Extract one vector from a tuple of two vectors"]
11140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s16])"]
11141#[inline(always)]
11142#[target_feature(enable = "sve")]
11143#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11144pub fn svget2_s16<const IMM_INDEX: i32>(tuple: svint16x2_t) -> svint16_t {
11145    static_assert_range!(IMM_INDEX, 0..=1);
11146    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11147}
11148#[doc = "Extract one vector from a tuple of two vectors"]
11149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s32])"]
11150#[inline(always)]
11151#[target_feature(enable = "sve")]
11152#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11153pub fn svget2_s32<const IMM_INDEX: i32>(tuple: svint32x2_t) -> svint32_t {
11154    static_assert_range!(IMM_INDEX, 0..=1);
11155    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11156}
11157#[doc = "Extract one vector from a tuple of two vectors"]
11158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s64])"]
11159#[inline(always)]
11160#[target_feature(enable = "sve")]
11161#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11162pub fn svget2_s64<const IMM_INDEX: i32>(tuple: svint64x2_t) -> svint64_t {
11163    static_assert_range!(IMM_INDEX, 0..=1);
11164    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11165}
11166#[doc = "Extract one vector from a tuple of two vectors"]
11167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u8])"]
11168#[inline(always)]
11169#[target_feature(enable = "sve")]
11170#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11171pub fn svget2_u8<const IMM_INDEX: i32>(tuple: svuint8x2_t) -> svuint8_t {
11172    static_assert_range!(IMM_INDEX, 0..=1);
11173    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11174}
11175#[doc = "Extract one vector from a tuple of two vectors"]
11176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u16])"]
11177#[inline(always)]
11178#[target_feature(enable = "sve")]
11179#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11180pub fn svget2_u16<const IMM_INDEX: i32>(tuple: svuint16x2_t) -> svuint16_t {
11181    static_assert_range!(IMM_INDEX, 0..=1);
11182    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11183}
11184#[doc = "Extract one vector from a tuple of two vectors"]
11185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u32])"]
11186#[inline(always)]
11187#[target_feature(enable = "sve")]
11188#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11189pub fn svget2_u32<const IMM_INDEX: i32>(tuple: svuint32x2_t) -> svuint32_t {
11190    static_assert_range!(IMM_INDEX, 0..=1);
11191    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11192}
11193#[doc = "Extract one vector from a tuple of two vectors"]
11194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u64])"]
11195#[inline(always)]
11196#[target_feature(enable = "sve")]
11197#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11198pub fn svget2_u64<const IMM_INDEX: i32>(tuple: svuint64x2_t) -> svuint64_t {
11199    static_assert_range!(IMM_INDEX, 0..=1);
11200    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11201}
11202#[doc = "Extract one vector from a tuple of three vectors"]
11203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f32])"]
11204#[inline(always)]
11205#[target_feature(enable = "sve")]
11206#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11207pub fn svget3_f32<const IMM_INDEX: i32>(tuple: svfloat32x3_t) -> svfloat32_t {
11208    static_assert_range!(IMM_INDEX, 0..=2);
11209    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11210}
11211#[doc = "Extract one vector from a tuple of three vectors"]
11212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f64])"]
11213#[inline(always)]
11214#[target_feature(enable = "sve")]
11215#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11216pub fn svget3_f64<const IMM_INDEX: i32>(tuple: svfloat64x3_t) -> svfloat64_t {
11217    static_assert_range!(IMM_INDEX, 0..=2);
11218    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11219}
11220#[doc = "Extract one vector from a tuple of three vectors"]
11221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s8])"]
11222#[inline(always)]
11223#[target_feature(enable = "sve")]
11224#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11225pub fn svget3_s8<const IMM_INDEX: i32>(tuple: svint8x3_t) -> svint8_t {
11226    static_assert_range!(IMM_INDEX, 0..=2);
11227    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11228}
11229#[doc = "Extract one vector from a tuple of three vectors"]
11230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s16])"]
11231#[inline(always)]
11232#[target_feature(enable = "sve")]
11233#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11234pub fn svget3_s16<const IMM_INDEX: i32>(tuple: svint16x3_t) -> svint16_t {
11235    static_assert_range!(IMM_INDEX, 0..=2);
11236    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11237}
11238#[doc = "Extract one vector from a tuple of three vectors"]
11239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s32])"]
11240#[inline(always)]
11241#[target_feature(enable = "sve")]
11242#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11243pub fn svget3_s32<const IMM_INDEX: i32>(tuple: svint32x3_t) -> svint32_t {
11244    static_assert_range!(IMM_INDEX, 0..=2);
11245    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11246}
11247#[doc = "Extract one vector from a tuple of three vectors"]
11248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s64])"]
11249#[inline(always)]
11250#[target_feature(enable = "sve")]
11251#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11252pub fn svget3_s64<const IMM_INDEX: i32>(tuple: svint64x3_t) -> svint64_t {
11253    static_assert_range!(IMM_INDEX, 0..=2);
11254    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11255}
11256#[doc = "Extract one vector from a tuple of three vectors"]
11257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u8])"]
11258#[inline(always)]
11259#[target_feature(enable = "sve")]
11260#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11261pub fn svget3_u8<const IMM_INDEX: i32>(tuple: svuint8x3_t) -> svuint8_t {
11262    static_assert_range!(IMM_INDEX, 0..=2);
11263    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11264}
11265#[doc = "Extract one vector from a tuple of three vectors"]
11266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u16])"]
11267#[inline(always)]
11268#[target_feature(enable = "sve")]
11269#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11270pub fn svget3_u16<const IMM_INDEX: i32>(tuple: svuint16x3_t) -> svuint16_t {
11271    static_assert_range!(IMM_INDEX, 0..=2);
11272    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11273}
11274#[doc = "Extract one vector from a tuple of three vectors"]
11275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u32])"]
11276#[inline(always)]
11277#[target_feature(enable = "sve")]
11278#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11279pub fn svget3_u32<const IMM_INDEX: i32>(tuple: svuint32x3_t) -> svuint32_t {
11280    static_assert_range!(IMM_INDEX, 0..=2);
11281    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11282}
11283#[doc = "Extract one vector from a tuple of three vectors"]
11284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u64])"]
11285#[inline(always)]
11286#[target_feature(enable = "sve")]
11287#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11288pub fn svget3_u64<const IMM_INDEX: i32>(tuple: svuint64x3_t) -> svuint64_t {
11289    static_assert_range!(IMM_INDEX, 0..=2);
11290    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11291}
11292#[doc = "Extract one vector from a tuple of four vectors"]
11293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f32])"]
11294#[inline(always)]
11295#[target_feature(enable = "sve")]
11296#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11297pub fn svget4_f32<const IMM_INDEX: i32>(tuple: svfloat32x4_t) -> svfloat32_t {
11298    static_assert_range!(IMM_INDEX, 0..=3);
11299    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11300}
11301#[doc = "Extract one vector from a tuple of four vectors"]
11302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f64])"]
11303#[inline(always)]
11304#[target_feature(enable = "sve")]
11305#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11306pub fn svget4_f64<const IMM_INDEX: i32>(tuple: svfloat64x4_t) -> svfloat64_t {
11307    static_assert_range!(IMM_INDEX, 0..=3);
11308    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11309}
11310#[doc = "Extract one vector from a tuple of four vectors"]
11311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s8])"]
11312#[inline(always)]
11313#[target_feature(enable = "sve")]
11314#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11315pub fn svget4_s8<const IMM_INDEX: i32>(tuple: svint8x4_t) -> svint8_t {
11316    static_assert_range!(IMM_INDEX, 0..=3);
11317    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11318}
11319#[doc = "Extract one vector from a tuple of four vectors"]
11320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s16])"]
11321#[inline(always)]
11322#[target_feature(enable = "sve")]
11323#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11324pub fn svget4_s16<const IMM_INDEX: i32>(tuple: svint16x4_t) -> svint16_t {
11325    static_assert_range!(IMM_INDEX, 0..=3);
11326    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11327}
11328#[doc = "Extract one vector from a tuple of four vectors"]
11329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s32])"]
11330#[inline(always)]
11331#[target_feature(enable = "sve")]
11332#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11333pub fn svget4_s32<const IMM_INDEX: i32>(tuple: svint32x4_t) -> svint32_t {
11334    static_assert_range!(IMM_INDEX, 0..=3);
11335    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11336}
11337#[doc = "Extract one vector from a tuple of four vectors"]
11338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s64])"]
11339#[inline(always)]
11340#[target_feature(enable = "sve")]
11341#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11342pub fn svget4_s64<const IMM_INDEX: i32>(tuple: svint64x4_t) -> svint64_t {
11343    static_assert_range!(IMM_INDEX, 0..=3);
11344    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11345}
11346#[doc = "Extract one vector from a tuple of four vectors"]
11347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u8])"]
11348#[inline(always)]
11349#[target_feature(enable = "sve")]
11350#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11351pub fn svget4_u8<const IMM_INDEX: i32>(tuple: svuint8x4_t) -> svuint8_t {
11352    static_assert_range!(IMM_INDEX, 0..=3);
11353    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11354}
11355#[doc = "Extract one vector from a tuple of four vectors"]
11356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u16])"]
11357#[inline(always)]
11358#[target_feature(enable = "sve")]
11359#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11360pub fn svget4_u16<const IMM_INDEX: i32>(tuple: svuint16x4_t) -> svuint16_t {
11361    static_assert_range!(IMM_INDEX, 0..=3);
11362    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11363}
11364#[doc = "Extract one vector from a tuple of four vectors"]
11365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u32])"]
11366#[inline(always)]
11367#[target_feature(enable = "sve")]
11368#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11369pub fn svget4_u32<const IMM_INDEX: i32>(tuple: svuint32x4_t) -> svuint32_t {
11370    static_assert_range!(IMM_INDEX, 0..=3);
11371    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11372}
11373#[doc = "Extract one vector from a tuple of four vectors"]
11374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u64])"]
11375#[inline(always)]
11376#[target_feature(enable = "sve")]
11377#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11378pub fn svget4_u64<const IMM_INDEX: i32>(tuple: svuint64x4_t) -> svuint64_t {
11379    static_assert_range!(IMM_INDEX, 0..=3);
11380    unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) }
11381}
11382#[doc = "Create linear series"]
11383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s8)"]
11384#[inline(always)]
11385#[target_feature(enable = "sve")]
11386#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11387#[cfg_attr(test, assert_instr(index))]
11388pub fn svindex_s8(base: i8, step: i8) -> svint8_t {
11389    unsafe extern "unadjusted" {
11390        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv16i8")]
11391        fn _svindex_s8(base: i8, step: i8) -> svint8_t;
11392    }
11393    unsafe { _svindex_s8(base, step) }
11394}
11395#[doc = "Create linear series"]
11396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s16)"]
11397#[inline(always)]
11398#[target_feature(enable = "sve")]
11399#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11400#[cfg_attr(test, assert_instr(index))]
11401pub fn svindex_s16(base: i16, step: i16) -> svint16_t {
11402    unsafe extern "unadjusted" {
11403        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv8i16")]
11404        fn _svindex_s16(base: i16, step: i16) -> svint16_t;
11405    }
11406    unsafe { _svindex_s16(base, step) }
11407}
11408#[doc = "Create linear series"]
11409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s32)"]
11410#[inline(always)]
11411#[target_feature(enable = "sve")]
11412#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11413#[cfg_attr(test, assert_instr(index))]
11414pub fn svindex_s32(base: i32, step: i32) -> svint32_t {
11415    unsafe extern "unadjusted" {
11416        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv4i32")]
11417        fn _svindex_s32(base: i32, step: i32) -> svint32_t;
11418    }
11419    unsafe { _svindex_s32(base, step) }
11420}
11421#[doc = "Create linear series"]
11422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s64)"]
11423#[inline(always)]
11424#[target_feature(enable = "sve")]
11425#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11426#[cfg_attr(test, assert_instr(index))]
11427pub fn svindex_s64(base: i64, step: i64) -> svint64_t {
11428    unsafe extern "unadjusted" {
11429        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv2i64")]
11430        fn _svindex_s64(base: i64, step: i64) -> svint64_t;
11431    }
11432    unsafe { _svindex_s64(base, step) }
11433}
11434#[doc = "Create linear series"]
11435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u8)"]
11436#[inline(always)]
11437#[target_feature(enable = "sve")]
11438#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11439#[cfg_attr(test, assert_instr(index))]
11440pub fn svindex_u8(base: u8, step: u8) -> svuint8_t {
11441    unsafe { svindex_s8(base.as_signed(), step.as_signed()).as_unsigned() }
11442}
11443#[doc = "Create linear series"]
11444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u16)"]
11445#[inline(always)]
11446#[target_feature(enable = "sve")]
11447#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11448#[cfg_attr(test, assert_instr(index))]
11449pub fn svindex_u16(base: u16, step: u16) -> svuint16_t {
11450    unsafe { svindex_s16(base.as_signed(), step.as_signed()).as_unsigned() }
11451}
11452#[doc = "Create linear series"]
11453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u32)"]
11454#[inline(always)]
11455#[target_feature(enable = "sve")]
11456#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11457#[cfg_attr(test, assert_instr(index))]
11458pub fn svindex_u32(base: u32, step: u32) -> svuint32_t {
11459    unsafe { svindex_s32(base.as_signed(), step.as_signed()).as_unsigned() }
11460}
11461#[doc = "Create linear series"]
11462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u64)"]
11463#[inline(always)]
11464#[target_feature(enable = "sve")]
11465#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11466#[cfg_attr(test, assert_instr(index))]
11467pub fn svindex_u64(base: u64, step: u64) -> svuint64_t {
11468    unsafe { svindex_s64(base.as_signed(), step.as_signed()).as_unsigned() }
11469}
11470#[doc = "Insert scalar in shifted vector"]
11471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f32])"]
11472#[inline(always)]
11473#[target_feature(enable = "sve")]
11474#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11475#[cfg_attr(test, assert_instr(insr))]
11476pub fn svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t {
11477    unsafe extern "unadjusted" {
11478        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4f32")]
11479        fn _svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t;
11480    }
11481    unsafe { _svinsr_n_f32(op1, op2) }
11482}
11483#[doc = "Insert scalar in shifted vector"]
11484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f64])"]
11485#[inline(always)]
11486#[target_feature(enable = "sve")]
11487#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11488#[cfg_attr(test, assert_instr(insr))]
11489pub fn svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t {
11490    unsafe extern "unadjusted" {
11491        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2f64")]
11492        fn _svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t;
11493    }
11494    unsafe { _svinsr_n_f64(op1, op2) }
11495}
11496#[doc = "Insert scalar in shifted vector"]
11497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s8])"]
11498#[inline(always)]
11499#[target_feature(enable = "sve")]
11500#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11501#[cfg_attr(test, assert_instr(insr))]
11502pub fn svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t {
11503    unsafe extern "unadjusted" {
11504        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv16i8")]
11505        fn _svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t;
11506    }
11507    unsafe { _svinsr_n_s8(op1, op2) }
11508}
11509#[doc = "Insert scalar in shifted vector"]
11510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s16])"]
11511#[inline(always)]
11512#[target_feature(enable = "sve")]
11513#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11514#[cfg_attr(test, assert_instr(insr))]
11515pub fn svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t {
11516    unsafe extern "unadjusted" {
11517        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv8i16")]
11518        fn _svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t;
11519    }
11520    unsafe { _svinsr_n_s16(op1, op2) }
11521}
11522#[doc = "Insert scalar in shifted vector"]
11523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s32])"]
11524#[inline(always)]
11525#[target_feature(enable = "sve")]
11526#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11527#[cfg_attr(test, assert_instr(insr))]
11528pub fn svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t {
11529    unsafe extern "unadjusted" {
11530        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4i32")]
11531        fn _svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t;
11532    }
11533    unsafe { _svinsr_n_s32(op1, op2) }
11534}
11535#[doc = "Insert scalar in shifted vector"]
11536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s64])"]
11537#[inline(always)]
11538#[target_feature(enable = "sve")]
11539#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11540#[cfg_attr(test, assert_instr(insr))]
11541pub fn svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t {
11542    unsafe extern "unadjusted" {
11543        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2i64")]
11544        fn _svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t;
11545    }
11546    unsafe { _svinsr_n_s64(op1, op2) }
11547}
11548#[doc = "Insert scalar in shifted vector"]
11549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u8])"]
11550#[inline(always)]
11551#[target_feature(enable = "sve")]
11552#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11553#[cfg_attr(test, assert_instr(insr))]
11554pub fn svinsr_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
11555    unsafe { svinsr_n_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
11556}
11557#[doc = "Insert scalar in shifted vector"]
11558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u16])"]
11559#[inline(always)]
11560#[target_feature(enable = "sve")]
11561#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11562#[cfg_attr(test, assert_instr(insr))]
11563pub fn svinsr_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
11564    unsafe { svinsr_n_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
11565}
11566#[doc = "Insert scalar in shifted vector"]
11567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u32])"]
11568#[inline(always)]
11569#[target_feature(enable = "sve")]
11570#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11571#[cfg_attr(test, assert_instr(insr))]
11572pub fn svinsr_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
11573    unsafe { svinsr_n_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
11574}
11575#[doc = "Insert scalar in shifted vector"]
11576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u64])"]
11577#[inline(always)]
11578#[target_feature(enable = "sve")]
11579#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11580#[cfg_attr(test, assert_instr(insr))]
11581pub fn svinsr_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
11582    unsafe { svinsr_n_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
11583}
11584#[doc = "Extract element after last"]
11585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f32])"]
11586#[inline(always)]
11587#[target_feature(enable = "sve")]
11588#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11589#[cfg_attr(test, assert_instr(lasta))]
11590pub fn svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
11591    unsafe extern "unadjusted" {
11592        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4f32")]
11593        fn _svlasta_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
11594    }
11595    unsafe { _svlasta_f32(pg.sve_into(), op) }
11596}
11597#[doc = "Extract element after last"]
11598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f64])"]
11599#[inline(always)]
11600#[target_feature(enable = "sve")]
11601#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11602#[cfg_attr(test, assert_instr(lasta))]
11603pub fn svlasta_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
11604    unsafe extern "unadjusted" {
11605        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2f64")]
11606        fn _svlasta_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
11607    }
11608    unsafe { _svlasta_f64(pg.sve_into(), op) }
11609}
11610#[doc = "Extract element after last"]
11611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s8])"]
11612#[inline(always)]
11613#[target_feature(enable = "sve")]
11614#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11615#[cfg_attr(test, assert_instr(lasta))]
11616pub fn svlasta_s8(pg: svbool_t, op: svint8_t) -> i8 {
11617    unsafe extern "unadjusted" {
11618        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv16i8")]
11619        fn _svlasta_s8(pg: svbool_t, op: svint8_t) -> i8;
11620    }
11621    unsafe { _svlasta_s8(pg, op) }
11622}
11623#[doc = "Extract element after last"]
11624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s16])"]
11625#[inline(always)]
11626#[target_feature(enable = "sve")]
11627#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11628#[cfg_attr(test, assert_instr(lasta))]
11629pub fn svlasta_s16(pg: svbool_t, op: svint16_t) -> i16 {
11630    unsafe extern "unadjusted" {
11631        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv8i16")]
11632        fn _svlasta_s16(pg: svbool8_t, op: svint16_t) -> i16;
11633    }
11634    unsafe { _svlasta_s16(pg.sve_into(), op) }
11635}
11636#[doc = "Extract element after last"]
11637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s32])"]
11638#[inline(always)]
11639#[target_feature(enable = "sve")]
11640#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11641#[cfg_attr(test, assert_instr(lasta))]
11642pub fn svlasta_s32(pg: svbool_t, op: svint32_t) -> i32 {
11643    unsafe extern "unadjusted" {
11644        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4i32")]
11645        fn _svlasta_s32(pg: svbool4_t, op: svint32_t) -> i32;
11646    }
11647    unsafe { _svlasta_s32(pg.sve_into(), op) }
11648}
11649#[doc = "Extract element after last"]
11650#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s64])"]
11651#[inline(always)]
11652#[target_feature(enable = "sve")]
11653#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11654#[cfg_attr(test, assert_instr(lasta))]
11655pub fn svlasta_s64(pg: svbool_t, op: svint64_t) -> i64 {
11656    unsafe extern "unadjusted" {
11657        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2i64")]
11658        fn _svlasta_s64(pg: svbool2_t, op: svint64_t) -> i64;
11659    }
11660    unsafe { _svlasta_s64(pg.sve_into(), op) }
11661}
11662#[doc = "Extract element after last"]
11663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u8])"]
11664#[inline(always)]
11665#[target_feature(enable = "sve")]
11666#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11667#[cfg_attr(test, assert_instr(lasta))]
11668pub fn svlasta_u8(pg: svbool_t, op: svuint8_t) -> u8 {
11669    unsafe { svlasta_s8(pg, op.as_signed()).as_unsigned() }
11670}
11671#[doc = "Extract element after last"]
11672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u16])"]
11673#[inline(always)]
11674#[target_feature(enable = "sve")]
11675#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11676#[cfg_attr(test, assert_instr(lasta))]
11677pub fn svlasta_u16(pg: svbool_t, op: svuint16_t) -> u16 {
11678    unsafe { svlasta_s16(pg, op.as_signed()).as_unsigned() }
11679}
11680#[doc = "Extract element after last"]
11681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u32])"]
11682#[inline(always)]
11683#[target_feature(enable = "sve")]
11684#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11685#[cfg_attr(test, assert_instr(lasta))]
11686pub fn svlasta_u32(pg: svbool_t, op: svuint32_t) -> u32 {
11687    unsafe { svlasta_s32(pg, op.as_signed()).as_unsigned() }
11688}
11689#[doc = "Extract element after last"]
11690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u64])"]
11691#[inline(always)]
11692#[target_feature(enable = "sve")]
11693#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11694#[cfg_attr(test, assert_instr(lasta))]
11695pub fn svlasta_u64(pg: svbool_t, op: svuint64_t) -> u64 {
11696    unsafe { svlasta_s64(pg, op.as_signed()).as_unsigned() }
11697}
11698#[doc = "Extract last element"]
11699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f32])"]
11700#[inline(always)]
11701#[target_feature(enable = "sve")]
11702#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11703#[cfg_attr(test, assert_instr(lastb))]
11704pub fn svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
11705    unsafe extern "unadjusted" {
11706        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4f32")]
11707        fn _svlastb_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
11708    }
11709    unsafe { _svlastb_f32(pg.sve_into(), op) }
11710}
11711#[doc = "Extract last element"]
11712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f64])"]
11713#[inline(always)]
11714#[target_feature(enable = "sve")]
11715#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11716#[cfg_attr(test, assert_instr(lastb))]
11717pub fn svlastb_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
11718    unsafe extern "unadjusted" {
11719        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2f64")]
11720        fn _svlastb_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
11721    }
11722    unsafe { _svlastb_f64(pg.sve_into(), op) }
11723}
11724#[doc = "Extract last element"]
11725#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s8])"]
11726#[inline(always)]
11727#[target_feature(enable = "sve")]
11728#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11729#[cfg_attr(test, assert_instr(lastb))]
11730pub fn svlastb_s8(pg: svbool_t, op: svint8_t) -> i8 {
11731    unsafe extern "unadjusted" {
11732        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv16i8")]
11733        fn _svlastb_s8(pg: svbool_t, op: svint8_t) -> i8;
11734    }
11735    unsafe { _svlastb_s8(pg, op) }
11736}
11737#[doc = "Extract last element"]
11738#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s16])"]
11739#[inline(always)]
11740#[target_feature(enable = "sve")]
11741#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11742#[cfg_attr(test, assert_instr(lastb))]
11743pub fn svlastb_s16(pg: svbool_t, op: svint16_t) -> i16 {
11744    unsafe extern "unadjusted" {
11745        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv8i16")]
11746        fn _svlastb_s16(pg: svbool8_t, op: svint16_t) -> i16;
11747    }
11748    unsafe { _svlastb_s16(pg.sve_into(), op) }
11749}
11750#[doc = "Extract last element"]
11751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s32])"]
11752#[inline(always)]
11753#[target_feature(enable = "sve")]
11754#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11755#[cfg_attr(test, assert_instr(lastb))]
11756pub fn svlastb_s32(pg: svbool_t, op: svint32_t) -> i32 {
11757    unsafe extern "unadjusted" {
11758        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4i32")]
11759        fn _svlastb_s32(pg: svbool4_t, op: svint32_t) -> i32;
11760    }
11761    unsafe { _svlastb_s32(pg.sve_into(), op) }
11762}
11763#[doc = "Extract last element"]
11764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s64])"]
11765#[inline(always)]
11766#[target_feature(enable = "sve")]
11767#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11768#[cfg_attr(test, assert_instr(lastb))]
11769pub fn svlastb_s64(pg: svbool_t, op: svint64_t) -> i64 {
11770    unsafe extern "unadjusted" {
11771        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2i64")]
11772        fn _svlastb_s64(pg: svbool2_t, op: svint64_t) -> i64;
11773    }
11774    unsafe { _svlastb_s64(pg.sve_into(), op) }
11775}
11776#[doc = "Extract last element"]
11777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u8])"]
11778#[inline(always)]
11779#[target_feature(enable = "sve")]
11780#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11781#[cfg_attr(test, assert_instr(lastb))]
11782pub fn svlastb_u8(pg: svbool_t, op: svuint8_t) -> u8 {
11783    unsafe { svlastb_s8(pg, op.as_signed()).as_unsigned() }
11784}
11785#[doc = "Extract last element"]
11786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u16])"]
11787#[inline(always)]
11788#[target_feature(enable = "sve")]
11789#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11790#[cfg_attr(test, assert_instr(lastb))]
11791pub fn svlastb_u16(pg: svbool_t, op: svuint16_t) -> u16 {
11792    unsafe { svlastb_s16(pg, op.as_signed()).as_unsigned() }
11793}
11794#[doc = "Extract last element"]
11795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u32])"]
11796#[inline(always)]
11797#[target_feature(enable = "sve")]
11798#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11799#[cfg_attr(test, assert_instr(lastb))]
11800pub fn svlastb_u32(pg: svbool_t, op: svuint32_t) -> u32 {
11801    unsafe { svlastb_s32(pg, op.as_signed()).as_unsigned() }
11802}
11803#[doc = "Extract last element"]
11804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u64])"]
11805#[inline(always)]
11806#[target_feature(enable = "sve")]
11807#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11808#[cfg_attr(test, assert_instr(lastb))]
11809pub fn svlastb_u64(pg: svbool_t, op: svuint64_t) -> u64 {
11810    unsafe { svlastb_s64(pg, op.as_signed()).as_unsigned() }
11811}
11812#[doc = "Unextended load"]
11813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"]
11814#[doc = "## Safety"]
11815#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11816#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11817#[inline(always)]
11818#[target_feature(enable = "sve")]
11819#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11820#[cfg_attr(test, assert_instr(ld1w))]
11821pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
11822    unsafe extern "unadjusted" {
11823        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")]
11824        fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
11825    }
11826    _svld1_f32(pg.sve_into(), base)
11827}
11828#[doc = "Unextended load"]
11829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"]
11830#[doc = "## Safety"]
11831#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11832#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11833#[inline(always)]
11834#[target_feature(enable = "sve")]
11835#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11836#[cfg_attr(test, assert_instr(ld1d))]
11837pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
11838    unsafe extern "unadjusted" {
11839        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")]
11840        fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
11841    }
11842    _svld1_f64(pg.sve_into(), base)
11843}
11844#[doc = "Unextended load"]
11845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"]
11846#[doc = "## Safety"]
11847#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11848#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11849#[inline(always)]
11850#[target_feature(enable = "sve")]
11851#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11852#[cfg_attr(test, assert_instr(ld1b))]
11853pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t {
11854    unsafe extern "unadjusted" {
11855        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")]
11856        fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t;
11857    }
11858    _svld1_s8(pg, base)
11859}
11860#[doc = "Unextended load"]
11861#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"]
11862#[doc = "## Safety"]
11863#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11864#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11865#[inline(always)]
11866#[target_feature(enable = "sve")]
11867#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11868#[cfg_attr(test, assert_instr(ld1h))]
11869pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t {
11870    unsafe extern "unadjusted" {
11871        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")]
11872        fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
11873    }
11874    _svld1_s16(pg.sve_into(), base)
11875}
11876#[doc = "Unextended load"]
11877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"]
11878#[doc = "## Safety"]
11879#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11880#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11881#[inline(always)]
11882#[target_feature(enable = "sve")]
11883#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11884#[cfg_attr(test, assert_instr(ld1w))]
11885pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t {
11886    unsafe extern "unadjusted" {
11887        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")]
11888        fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
11889    }
11890    _svld1_s32(pg.sve_into(), base)
11891}
11892#[doc = "Unextended load"]
11893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"]
11894#[doc = "## Safety"]
11895#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11896#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11897#[inline(always)]
11898#[target_feature(enable = "sve")]
11899#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11900#[cfg_attr(test, assert_instr(ld1d))]
11901pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t {
11902    unsafe extern "unadjusted" {
11903        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")]
11904        fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
11905    }
11906    _svld1_s64(pg.sve_into(), base)
11907}
11908#[doc = "Unextended load"]
11909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"]
11910#[doc = "## Safety"]
11911#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11912#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11913#[inline(always)]
11914#[target_feature(enable = "sve")]
11915#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11916#[cfg_attr(test, assert_instr(ld1b))]
11917pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
11918    svld1_s8(pg, base.as_signed()).as_unsigned()
11919}
11920#[doc = "Unextended load"]
11921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"]
11922#[doc = "## Safety"]
11923#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11924#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11925#[inline(always)]
11926#[target_feature(enable = "sve")]
11927#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11928#[cfg_attr(test, assert_instr(ld1h))]
11929pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
11930    svld1_s16(pg, base.as_signed()).as_unsigned()
11931}
11932#[doc = "Unextended load"]
11933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"]
11934#[doc = "## Safety"]
11935#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11936#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11937#[inline(always)]
11938#[target_feature(enable = "sve")]
11939#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11940#[cfg_attr(test, assert_instr(ld1w))]
11941pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
11942    svld1_s32(pg, base.as_signed()).as_unsigned()
11943}
11944#[doc = "Unextended load"]
11945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"]
11946#[doc = "## Safety"]
11947#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11948#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11949#[inline(always)]
11950#[target_feature(enable = "sve")]
11951#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11952#[cfg_attr(test, assert_instr(ld1d))]
11953pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
11954    svld1_s64(pg, base.as_signed()).as_unsigned()
11955}
11956#[doc = "Unextended load"]
11957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_f32])"]
11958#[doc = "## Safety"]
11959#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11960#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11961#[inline(always)]
11962#[target_feature(enable = "sve")]
11963#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11964#[cfg_attr(test, assert_instr(ld1w))]
11965pub unsafe fn svld1_gather_s32index_f32(
11966    pg: svbool_t,
11967    base: *const f32,
11968    indices: svint32_t,
11969) -> svfloat32_t {
11970    unsafe extern "unadjusted" {
11971        #[cfg_attr(
11972            target_arch = "aarch64",
11973            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32"
11974        )]
11975        fn _svld1_gather_s32index_f32(
11976            pg: svbool4_t,
11977            base: *const f32,
11978            indices: svint32_t,
11979        ) -> svfloat32_t;
11980    }
11981    _svld1_gather_s32index_f32(pg.sve_into(), base, indices)
11982}
11983#[doc = "Unextended load"]
11984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_s32])"]
11985#[doc = "## Safety"]
11986#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
11987#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
11988#[inline(always)]
11989#[target_feature(enable = "sve")]
11990#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
11991#[cfg_attr(test, assert_instr(ld1w))]
11992pub unsafe fn svld1_gather_s32index_s32(
11993    pg: svbool_t,
11994    base: *const i32,
11995    indices: svint32_t,
11996) -> svint32_t {
11997    unsafe extern "unadjusted" {
11998        #[cfg_attr(
11999            target_arch = "aarch64",
12000            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32"
12001        )]
12002        fn _svld1_gather_s32index_s32(
12003            pg: svbool4_t,
12004            base: *const i32,
12005            indices: svint32_t,
12006        ) -> svint32_t;
12007    }
12008    _svld1_gather_s32index_s32(pg.sve_into(), base, indices)
12009}
12010#[doc = "Unextended load"]
12011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_u32])"]
12012#[doc = "## Safety"]
12013#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12014#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12015#[inline(always)]
12016#[target_feature(enable = "sve")]
12017#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12018#[cfg_attr(test, assert_instr(ld1w))]
12019pub unsafe fn svld1_gather_s32index_u32(
12020    pg: svbool_t,
12021    base: *const u32,
12022    indices: svint32_t,
12023) -> svuint32_t {
12024    svld1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned()
12025}
12026#[doc = "Unextended load"]
12027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_f64])"]
12028#[doc = "## Safety"]
12029#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12030#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12031#[inline(always)]
12032#[target_feature(enable = "sve")]
12033#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12034#[cfg_attr(test, assert_instr(ld1d))]
12035pub unsafe fn svld1_gather_s64index_f64(
12036    pg: svbool_t,
12037    base: *const f64,
12038    indices: svint64_t,
12039) -> svfloat64_t {
12040    unsafe extern "unadjusted" {
12041        #[cfg_attr(
12042            target_arch = "aarch64",
12043            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2f64"
12044        )]
12045        fn _svld1_gather_s64index_f64(
12046            pg: svbool2_t,
12047            base: *const f64,
12048            indices: svint64_t,
12049        ) -> svfloat64_t;
12050    }
12051    _svld1_gather_s64index_f64(pg.sve_into(), base, indices)
12052}
12053#[doc = "Unextended load"]
12054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_s64])"]
12055#[doc = "## Safety"]
12056#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12057#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12058#[inline(always)]
12059#[target_feature(enable = "sve")]
12060#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12061#[cfg_attr(test, assert_instr(ld1d))]
12062pub unsafe fn svld1_gather_s64index_s64(
12063    pg: svbool_t,
12064    base: *const i64,
12065    indices: svint64_t,
12066) -> svint64_t {
12067    unsafe extern "unadjusted" {
12068        #[cfg_attr(
12069            target_arch = "aarch64",
12070            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i64"
12071        )]
12072        fn _svld1_gather_s64index_s64(
12073            pg: svbool2_t,
12074            base: *const i64,
12075            indices: svint64_t,
12076        ) -> svint64_t;
12077    }
12078    _svld1_gather_s64index_s64(pg.sve_into(), base, indices)
12079}
12080#[doc = "Unextended load"]
12081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_u64])"]
12082#[doc = "## Safety"]
12083#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12084#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12085#[inline(always)]
12086#[target_feature(enable = "sve")]
12087#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12088#[cfg_attr(test, assert_instr(ld1d))]
12089pub unsafe fn svld1_gather_s64index_u64(
12090    pg: svbool_t,
12091    base: *const u64,
12092    indices: svint64_t,
12093) -> svuint64_t {
12094    svld1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned()
12095}
12096#[doc = "Unextended load"]
12097#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_f32])"]
12098#[doc = "## Safety"]
12099#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12100#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12101#[inline(always)]
12102#[target_feature(enable = "sve")]
12103#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12104#[cfg_attr(test, assert_instr(ld1w))]
12105pub unsafe fn svld1_gather_u32index_f32(
12106    pg: svbool_t,
12107    base: *const f32,
12108    indices: svuint32_t,
12109) -> svfloat32_t {
12110    unsafe extern "unadjusted" {
12111        #[cfg_attr(
12112            target_arch = "aarch64",
12113            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32"
12114        )]
12115        fn _svld1_gather_u32index_f32(
12116            pg: svbool4_t,
12117            base: *const f32,
12118            indices: svint32_t,
12119        ) -> svfloat32_t;
12120    }
12121    _svld1_gather_u32index_f32(pg.sve_into(), base, indices.as_signed())
12122}
12123#[doc = "Unextended load"]
12124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_s32])"]
12125#[doc = "## Safety"]
12126#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12127#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12128#[inline(always)]
12129#[target_feature(enable = "sve")]
12130#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12131#[cfg_attr(test, assert_instr(ld1w))]
12132pub unsafe fn svld1_gather_u32index_s32(
12133    pg: svbool_t,
12134    base: *const i32,
12135    indices: svuint32_t,
12136) -> svint32_t {
12137    unsafe extern "unadjusted" {
12138        #[cfg_attr(
12139            target_arch = "aarch64",
12140            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32"
12141        )]
12142        fn _svld1_gather_u32index_s32(
12143            pg: svbool4_t,
12144            base: *const i32,
12145            indices: svint32_t,
12146        ) -> svint32_t;
12147    }
12148    _svld1_gather_u32index_s32(pg.sve_into(), base, indices.as_signed())
12149}
12150#[doc = "Unextended load"]
12151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_u32])"]
12152#[doc = "## Safety"]
12153#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12154#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12155#[inline(always)]
12156#[target_feature(enable = "sve")]
12157#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12158#[cfg_attr(test, assert_instr(ld1w))]
12159pub unsafe fn svld1_gather_u32index_u32(
12160    pg: svbool_t,
12161    base: *const u32,
12162    indices: svuint32_t,
12163) -> svuint32_t {
12164    svld1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned()
12165}
12166#[doc = "Unextended load"]
12167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_f64])"]
12168#[doc = "## Safety"]
12169#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12170#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12171#[inline(always)]
12172#[target_feature(enable = "sve")]
12173#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12174#[cfg_attr(test, assert_instr(ld1d))]
12175pub unsafe fn svld1_gather_u64index_f64(
12176    pg: svbool_t,
12177    base: *const f64,
12178    indices: svuint64_t,
12179) -> svfloat64_t {
12180    svld1_gather_s64index_f64(pg, base, indices.as_signed())
12181}
12182#[doc = "Unextended load"]
12183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_s64])"]
12184#[doc = "## Safety"]
12185#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12186#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12187#[inline(always)]
12188#[target_feature(enable = "sve")]
12189#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12190#[cfg_attr(test, assert_instr(ld1d))]
12191pub unsafe fn svld1_gather_u64index_s64(
12192    pg: svbool_t,
12193    base: *const i64,
12194    indices: svuint64_t,
12195) -> svint64_t {
12196    svld1_gather_s64index_s64(pg, base, indices.as_signed())
12197}
12198#[doc = "Unextended load"]
12199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_u64])"]
12200#[doc = "## Safety"]
12201#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12202#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12203#[inline(always)]
12204#[target_feature(enable = "sve")]
12205#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12206#[cfg_attr(test, assert_instr(ld1d))]
12207pub unsafe fn svld1_gather_u64index_u64(
12208    pg: svbool_t,
12209    base: *const u64,
12210    indices: svuint64_t,
12211) -> svuint64_t {
12212    svld1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned()
12213}
12214#[doc = "Unextended load"]
12215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_f32])"]
12216#[doc = "## Safety"]
12217#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12218#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12219#[inline(always)]
12220#[target_feature(enable = "sve")]
12221#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12222#[cfg_attr(test, assert_instr(ld1w))]
12223pub unsafe fn svld1_gather_s32offset_f32(
12224    pg: svbool_t,
12225    base: *const f32,
12226    offsets: svint32_t,
12227) -> svfloat32_t {
12228    unsafe extern "unadjusted" {
12229        #[cfg_attr(
12230            target_arch = "aarch64",
12231            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32"
12232        )]
12233        fn _svld1_gather_s32offset_f32(
12234            pg: svbool4_t,
12235            base: *const f32,
12236            offsets: svint32_t,
12237        ) -> svfloat32_t;
12238    }
12239    _svld1_gather_s32offset_f32(pg.sve_into(), base, offsets)
12240}
12241#[doc = "Unextended load"]
12242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_s32])"]
12243#[doc = "## Safety"]
12244#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12245#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12246#[inline(always)]
12247#[target_feature(enable = "sve")]
12248#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12249#[cfg_attr(test, assert_instr(ld1w))]
12250pub unsafe fn svld1_gather_s32offset_s32(
12251    pg: svbool_t,
12252    base: *const i32,
12253    offsets: svint32_t,
12254) -> svint32_t {
12255    unsafe extern "unadjusted" {
12256        #[cfg_attr(
12257            target_arch = "aarch64",
12258            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32"
12259        )]
12260        fn _svld1_gather_s32offset_s32(
12261            pg: svbool4_t,
12262            base: *const i32,
12263            offsets: svint32_t,
12264        ) -> svint32_t;
12265    }
12266    _svld1_gather_s32offset_s32(pg.sve_into(), base, offsets)
12267}
12268#[doc = "Unextended load"]
12269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_u32])"]
12270#[doc = "## Safety"]
12271#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12272#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12273#[inline(always)]
12274#[target_feature(enable = "sve")]
12275#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12276#[cfg_attr(test, assert_instr(ld1w))]
12277pub unsafe fn svld1_gather_s32offset_u32(
12278    pg: svbool_t,
12279    base: *const u32,
12280    offsets: svint32_t,
12281) -> svuint32_t {
12282    svld1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned()
12283}
12284#[doc = "Unextended load"]
12285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_f64])"]
12286#[doc = "## Safety"]
12287#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12288#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12289#[inline(always)]
12290#[target_feature(enable = "sve")]
12291#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12292#[cfg_attr(test, assert_instr(ld1d))]
12293pub unsafe fn svld1_gather_s64offset_f64(
12294    pg: svbool_t,
12295    base: *const f64,
12296    offsets: svint64_t,
12297) -> svfloat64_t {
12298    unsafe extern "unadjusted" {
12299        #[cfg_attr(
12300            target_arch = "aarch64",
12301            link_name = "llvm.aarch64.sve.ld1.gather.nxv2f64"
12302        )]
12303        fn _svld1_gather_s64offset_f64(
12304            pg: svbool2_t,
12305            base: *const f64,
12306            offsets: svint64_t,
12307        ) -> svfloat64_t;
12308    }
12309    _svld1_gather_s64offset_f64(pg.sve_into(), base, offsets)
12310}
12311#[doc = "Unextended load"]
12312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_s64])"]
12313#[doc = "## Safety"]
12314#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12315#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12316#[inline(always)]
12317#[target_feature(enable = "sve")]
12318#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12319#[cfg_attr(test, assert_instr(ld1d))]
12320pub unsafe fn svld1_gather_s64offset_s64(
12321    pg: svbool_t,
12322    base: *const i64,
12323    offsets: svint64_t,
12324) -> svint64_t {
12325    unsafe extern "unadjusted" {
12326        #[cfg_attr(
12327            target_arch = "aarch64",
12328            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i64"
12329        )]
12330        fn _svld1_gather_s64offset_s64(
12331            pg: svbool2_t,
12332            base: *const i64,
12333            offsets: svint64_t,
12334        ) -> svint64_t;
12335    }
12336    _svld1_gather_s64offset_s64(pg.sve_into(), base, offsets)
12337}
12338#[doc = "Unextended load"]
12339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_u64])"]
12340#[doc = "## Safety"]
12341#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12342#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12343#[inline(always)]
12344#[target_feature(enable = "sve")]
12345#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12346#[cfg_attr(test, assert_instr(ld1d))]
12347pub unsafe fn svld1_gather_s64offset_u64(
12348    pg: svbool_t,
12349    base: *const u64,
12350    offsets: svint64_t,
12351) -> svuint64_t {
12352    svld1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned()
12353}
12354#[doc = "Unextended load"]
12355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_f32])"]
12356#[doc = "## Safety"]
12357#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12358#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12359#[inline(always)]
12360#[target_feature(enable = "sve")]
12361#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12362#[cfg_attr(test, assert_instr(ld1w))]
12363pub unsafe fn svld1_gather_u32offset_f32(
12364    pg: svbool_t,
12365    base: *const f32,
12366    offsets: svuint32_t,
12367) -> svfloat32_t {
12368    unsafe extern "unadjusted" {
12369        #[cfg_attr(
12370            target_arch = "aarch64",
12371            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32"
12372        )]
12373        fn _svld1_gather_u32offset_f32(
12374            pg: svbool4_t,
12375            base: *const f32,
12376            offsets: svint32_t,
12377        ) -> svfloat32_t;
12378    }
12379    _svld1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed())
12380}
12381#[doc = "Unextended load"]
12382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_s32])"]
12383#[doc = "## Safety"]
12384#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12385#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12386#[inline(always)]
12387#[target_feature(enable = "sve")]
12388#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12389#[cfg_attr(test, assert_instr(ld1w))]
12390pub unsafe fn svld1_gather_u32offset_s32(
12391    pg: svbool_t,
12392    base: *const i32,
12393    offsets: svuint32_t,
12394) -> svint32_t {
12395    unsafe extern "unadjusted" {
12396        #[cfg_attr(
12397            target_arch = "aarch64",
12398            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32"
12399        )]
12400        fn _svld1_gather_u32offset_s32(
12401            pg: svbool4_t,
12402            base: *const i32,
12403            offsets: svint32_t,
12404        ) -> svint32_t;
12405    }
12406    _svld1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed())
12407}
12408#[doc = "Unextended load"]
12409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_u32])"]
12410#[doc = "## Safety"]
12411#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12412#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12413#[inline(always)]
12414#[target_feature(enable = "sve")]
12415#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12416#[cfg_attr(test, assert_instr(ld1w))]
12417pub unsafe fn svld1_gather_u32offset_u32(
12418    pg: svbool_t,
12419    base: *const u32,
12420    offsets: svuint32_t,
12421) -> svuint32_t {
12422    svld1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned()
12423}
12424#[doc = "Unextended load"]
12425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_f64])"]
12426#[doc = "## Safety"]
12427#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12428#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12429#[inline(always)]
12430#[target_feature(enable = "sve")]
12431#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12432#[cfg_attr(test, assert_instr(ld1d))]
12433pub unsafe fn svld1_gather_u64offset_f64(
12434    pg: svbool_t,
12435    base: *const f64,
12436    offsets: svuint64_t,
12437) -> svfloat64_t {
12438    svld1_gather_s64offset_f64(pg, base, offsets.as_signed())
12439}
12440#[doc = "Unextended load"]
12441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_s64])"]
12442#[doc = "## Safety"]
12443#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12444#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12445#[inline(always)]
12446#[target_feature(enable = "sve")]
12447#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12448#[cfg_attr(test, assert_instr(ld1d))]
12449pub unsafe fn svld1_gather_u64offset_s64(
12450    pg: svbool_t,
12451    base: *const i64,
12452    offsets: svuint64_t,
12453) -> svint64_t {
12454    svld1_gather_s64offset_s64(pg, base, offsets.as_signed())
12455}
12456#[doc = "Unextended load"]
12457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_u64])"]
12458#[doc = "## Safety"]
12459#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12460#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12461#[inline(always)]
12462#[target_feature(enable = "sve")]
12463#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12464#[cfg_attr(test, assert_instr(ld1d))]
12465pub unsafe fn svld1_gather_u64offset_u64(
12466    pg: svbool_t,
12467    base: *const u64,
12468    offsets: svuint64_t,
12469) -> svuint64_t {
12470    svld1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned()
12471}
12472#[doc = "Unextended load"]
12473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_f32)"]
12474#[doc = "## Safety"]
12475#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12476#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12477#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12478#[inline(always)]
12479#[target_feature(enable = "sve")]
12480#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12481#[cfg_attr(test, assert_instr(ld1w))]
12482pub unsafe fn svld1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t {
12483    svld1_gather_u32base_offset_f32(pg, bases, 0)
12484}
12485#[doc = "Unextended load"]
12486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_s32)"]
12487#[doc = "## Safety"]
12488#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12489#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12490#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12491#[inline(always)]
12492#[target_feature(enable = "sve")]
12493#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12494#[cfg_attr(test, assert_instr(ld1w))]
12495pub unsafe fn svld1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
12496    svld1_gather_u32base_offset_s32(pg, bases, 0)
12497}
12498#[doc = "Unextended load"]
12499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_u32)"]
12500#[doc = "## Safety"]
12501#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12502#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12503#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12504#[inline(always)]
12505#[target_feature(enable = "sve")]
12506#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12507#[cfg_attr(test, assert_instr(ld1w))]
12508pub unsafe fn svld1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
12509    svld1_gather_u32base_offset_u32(pg, bases, 0)
12510}
12511#[doc = "Unextended load"]
12512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_f64)"]
12513#[doc = "## Safety"]
12514#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12515#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12516#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12517#[inline(always)]
12518#[target_feature(enable = "sve")]
12519#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12520#[cfg_attr(test, assert_instr(ld1d))]
12521pub unsafe fn svld1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t {
12522    svld1_gather_u64base_offset_f64(pg, bases, 0)
12523}
12524#[doc = "Unextended load"]
12525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_s64)"]
12526#[doc = "## Safety"]
12527#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12528#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12529#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12530#[inline(always)]
12531#[target_feature(enable = "sve")]
12532#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12533#[cfg_attr(test, assert_instr(ld1d))]
12534pub unsafe fn svld1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
12535    svld1_gather_u64base_offset_s64(pg, bases, 0)
12536}
12537#[doc = "Unextended load"]
12538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_u64)"]
12539#[doc = "## Safety"]
12540#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12541#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12542#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12543#[inline(always)]
12544#[target_feature(enable = "sve")]
12545#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12546#[cfg_attr(test, assert_instr(ld1d))]
12547pub unsafe fn svld1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
12548    svld1_gather_u64base_offset_u64(pg, bases, 0)
12549}
12550#[doc = "Unextended load"]
12551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_f32)"]
12552#[doc = "## Safety"]
12553#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12554#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12555#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12556#[inline(always)]
12557#[target_feature(enable = "sve")]
12558#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12559#[cfg_attr(test, assert_instr(ld1w))]
12560pub unsafe fn svld1_gather_u32base_index_f32(
12561    pg: svbool_t,
12562    bases: svuint32_t,
12563    index: i64,
12564) -> svfloat32_t {
12565    svld1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2))
12566}
12567#[doc = "Unextended load"]
12568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_s32)"]
12569#[doc = "## Safety"]
12570#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12571#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12572#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12573#[inline(always)]
12574#[target_feature(enable = "sve")]
12575#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12576#[cfg_attr(test, assert_instr(ld1w))]
12577pub unsafe fn svld1_gather_u32base_index_s32(
12578    pg: svbool_t,
12579    bases: svuint32_t,
12580    index: i64,
12581) -> svint32_t {
12582    svld1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2))
12583}
12584#[doc = "Unextended load"]
12585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_u32)"]
12586#[doc = "## Safety"]
12587#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12588#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12589#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12590#[inline(always)]
12591#[target_feature(enable = "sve")]
12592#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12593#[cfg_attr(test, assert_instr(ld1w))]
12594pub unsafe fn svld1_gather_u32base_index_u32(
12595    pg: svbool_t,
12596    bases: svuint32_t,
12597    index: i64,
12598) -> svuint32_t {
12599    svld1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2))
12600}
12601#[doc = "Unextended load"]
12602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_f64)"]
12603#[doc = "## Safety"]
12604#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12605#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12606#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12607#[inline(always)]
12608#[target_feature(enable = "sve")]
12609#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12610#[cfg_attr(test, assert_instr(ld1d))]
12611pub unsafe fn svld1_gather_u64base_index_f64(
12612    pg: svbool_t,
12613    bases: svuint64_t,
12614    index: i64,
12615) -> svfloat64_t {
12616    svld1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3))
12617}
12618#[doc = "Unextended load"]
12619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_s64)"]
12620#[doc = "## Safety"]
12621#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12622#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12623#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12624#[inline(always)]
12625#[target_feature(enable = "sve")]
12626#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12627#[cfg_attr(test, assert_instr(ld1d))]
12628pub unsafe fn svld1_gather_u64base_index_s64(
12629    pg: svbool_t,
12630    bases: svuint64_t,
12631    index: i64,
12632) -> svint64_t {
12633    svld1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3))
12634}
12635#[doc = "Unextended load"]
12636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_u64)"]
12637#[doc = "## Safety"]
12638#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12639#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12640#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12641#[inline(always)]
12642#[target_feature(enable = "sve")]
12643#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12644#[cfg_attr(test, assert_instr(ld1d))]
12645pub unsafe fn svld1_gather_u64base_index_u64(
12646    pg: svbool_t,
12647    bases: svuint64_t,
12648    index: i64,
12649) -> svuint64_t {
12650    svld1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3))
12651}
12652#[doc = "Unextended load"]
12653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_f32)"]
12654#[doc = "## Safety"]
12655#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12656#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12657#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12658#[inline(always)]
12659#[target_feature(enable = "sve")]
12660#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12661#[cfg_attr(test, assert_instr(ld1w))]
12662pub unsafe fn svld1_gather_u32base_offset_f32(
12663    pg: svbool_t,
12664    bases: svuint32_t,
12665    offset: i64,
12666) -> svfloat32_t {
12667    unsafe extern "unadjusted" {
12668        #[cfg_attr(
12669            target_arch = "aarch64",
12670            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32"
12671        )]
12672        fn _svld1_gather_u32base_offset_f32(
12673            pg: svbool4_t,
12674            bases: svint32_t,
12675            offset: i64,
12676        ) -> svfloat32_t;
12677    }
12678    _svld1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset)
12679}
12680#[doc = "Unextended load"]
12681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_s32)"]
12682#[doc = "## Safety"]
12683#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12684#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12685#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12686#[inline(always)]
12687#[target_feature(enable = "sve")]
12688#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12689#[cfg_attr(test, assert_instr(ld1w))]
12690pub unsafe fn svld1_gather_u32base_offset_s32(
12691    pg: svbool_t,
12692    bases: svuint32_t,
12693    offset: i64,
12694) -> svint32_t {
12695    unsafe extern "unadjusted" {
12696        #[cfg_attr(
12697            target_arch = "aarch64",
12698            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32"
12699        )]
12700        fn _svld1_gather_u32base_offset_s32(
12701            pg: svbool4_t,
12702            bases: svint32_t,
12703            offset: i64,
12704        ) -> svint32_t;
12705    }
12706    _svld1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset)
12707}
12708#[doc = "Unextended load"]
12709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_u32)"]
12710#[doc = "## Safety"]
12711#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12712#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12713#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12714#[inline(always)]
12715#[target_feature(enable = "sve")]
12716#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12717#[cfg_attr(test, assert_instr(ld1w))]
12718pub unsafe fn svld1_gather_u32base_offset_u32(
12719    pg: svbool_t,
12720    bases: svuint32_t,
12721    offset: i64,
12722) -> svuint32_t {
12723    svld1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
12724}
12725#[doc = "Unextended load"]
12726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_f64)"]
12727#[doc = "## Safety"]
12728#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12729#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12730#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12731#[inline(always)]
12732#[target_feature(enable = "sve")]
12733#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12734#[cfg_attr(test, assert_instr(ld1d))]
12735pub unsafe fn svld1_gather_u64base_offset_f64(
12736    pg: svbool_t,
12737    bases: svuint64_t,
12738    offset: i64,
12739) -> svfloat64_t {
12740    unsafe extern "unadjusted" {
12741        #[cfg_attr(
12742            target_arch = "aarch64",
12743            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64"
12744        )]
12745        fn _svld1_gather_u64base_offset_f64(
12746            pg: svbool2_t,
12747            bases: svint64_t,
12748            offset: i64,
12749        ) -> svfloat64_t;
12750    }
12751    _svld1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset)
12752}
12753#[doc = "Unextended load"]
12754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_s64)"]
12755#[doc = "## Safety"]
12756#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12757#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12758#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12759#[inline(always)]
12760#[target_feature(enable = "sve")]
12761#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12762#[cfg_attr(test, assert_instr(ld1d))]
12763pub unsafe fn svld1_gather_u64base_offset_s64(
12764    pg: svbool_t,
12765    bases: svuint64_t,
12766    offset: i64,
12767) -> svint64_t {
12768    unsafe extern "unadjusted" {
12769        #[cfg_attr(
12770            target_arch = "aarch64",
12771            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64"
12772        )]
12773        fn _svld1_gather_u64base_offset_s64(
12774            pg: svbool2_t,
12775            bases: svint64_t,
12776            offset: i64,
12777        ) -> svint64_t;
12778    }
12779    _svld1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset)
12780}
12781#[doc = "Unextended load"]
12782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_u64)"]
12783#[doc = "## Safety"]
12784#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12785#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12786#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
12787#[inline(always)]
12788#[target_feature(enable = "sve")]
12789#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12790#[cfg_attr(test, assert_instr(ld1d))]
12791pub unsafe fn svld1_gather_u64base_offset_u64(
12792    pg: svbool_t,
12793    bases: svuint64_t,
12794    offset: i64,
12795) -> svuint64_t {
12796    svld1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
12797}
12798#[doc = "Unextended load"]
12799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f32])"]
12800#[doc = "## Safety"]
12801#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
12802#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12803#[inline(always)]
12804#[target_feature(enable = "sve")]
12805#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12806#[cfg_attr(test, assert_instr(ld1w))]
12807pub unsafe fn svld1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t {
12808    svld1_f32(pg, base.offset(svcntw() as isize * vnum as isize))
12809}
12810#[doc = "Unextended load"]
12811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f64])"]
12812#[doc = "## Safety"]
12813#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
12814#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12815#[inline(always)]
12816#[target_feature(enable = "sve")]
12817#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12818#[cfg_attr(test, assert_instr(ld1d))]
12819pub unsafe fn svld1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t {
12820    svld1_f64(pg, base.offset(svcntd() as isize * vnum as isize))
12821}
12822#[doc = "Unextended load"]
12823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s8])"]
12824#[doc = "## Safety"]
12825#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
12826#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12827#[inline(always)]
12828#[target_feature(enable = "sve")]
12829#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12830#[cfg_attr(test, assert_instr(ld1b))]
12831pub unsafe fn svld1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t {
12832    svld1_s8(pg, base.offset(svcntb() as isize * vnum as isize))
12833}
12834#[doc = "Unextended load"]
12835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s16])"]
12836#[doc = "## Safety"]
12837#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
12838#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12839#[inline(always)]
12840#[target_feature(enable = "sve")]
12841#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12842#[cfg_attr(test, assert_instr(ld1h))]
12843pub unsafe fn svld1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t {
12844    svld1_s16(pg, base.offset(svcnth() as isize * vnum as isize))
12845}
12846#[doc = "Unextended load"]
12847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s32])"]
12848#[doc = "## Safety"]
12849#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
12850#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12851#[inline(always)]
12852#[target_feature(enable = "sve")]
12853#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12854#[cfg_attr(test, assert_instr(ld1w))]
12855pub unsafe fn svld1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t {
12856    svld1_s32(pg, base.offset(svcntw() as isize * vnum as isize))
12857}
12858#[doc = "Unextended load"]
12859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s64])"]
12860#[doc = "## Safety"]
12861#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
12862#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12863#[inline(always)]
12864#[target_feature(enable = "sve")]
12865#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12866#[cfg_attr(test, assert_instr(ld1d))]
12867pub unsafe fn svld1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t {
12868    svld1_s64(pg, base.offset(svcntd() as isize * vnum as isize))
12869}
12870#[doc = "Unextended load"]
12871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u8])"]
12872#[doc = "## Safety"]
12873#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
12874#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12875#[inline(always)]
12876#[target_feature(enable = "sve")]
12877#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12878#[cfg_attr(test, assert_instr(ld1b))]
12879pub unsafe fn svld1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t {
12880    svld1_u8(pg, base.offset(svcntb() as isize * vnum as isize))
12881}
12882#[doc = "Unextended load"]
12883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u16])"]
12884#[doc = "## Safety"]
12885#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
12886#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12887#[inline(always)]
12888#[target_feature(enable = "sve")]
12889#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12890#[cfg_attr(test, assert_instr(ld1h))]
12891pub unsafe fn svld1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t {
12892    svld1_u16(pg, base.offset(svcnth() as isize * vnum as isize))
12893}
12894#[doc = "Unextended load"]
12895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u32])"]
12896#[doc = "## Safety"]
12897#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
12898#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12899#[inline(always)]
12900#[target_feature(enable = "sve")]
12901#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12902#[cfg_attr(test, assert_instr(ld1w))]
12903pub unsafe fn svld1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t {
12904    svld1_u32(pg, base.offset(svcntw() as isize * vnum as isize))
12905}
12906#[doc = "Unextended load"]
12907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u64])"]
12908#[doc = "## Safety"]
12909#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
12910#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12911#[inline(always)]
12912#[target_feature(enable = "sve")]
12913#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12914#[cfg_attr(test, assert_instr(ld1d))]
12915pub unsafe fn svld1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t {
12916    svld1_u64(pg, base.offset(svcntd() as isize * vnum as isize))
12917}
12918#[doc = "Load and replicate 256 bits of data"]
12919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f32])"]
12920#[doc = "## Safety"]
12921#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12922#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12923#[inline(always)]
12924#[target_feature(enable = "sve,f64mm")]
12925#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12926#[cfg_attr(test, assert_instr(ld1row))]
12927pub unsafe fn svld1ro_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
12928    unsafe extern "unadjusted" {
12929        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4f32")]
12930        fn _svld1ro_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
12931    }
12932    _svld1ro_f32(pg.sve_into(), base)
12933}
12934#[doc = "Load and replicate 256 bits of data"]
12935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f64])"]
12936#[doc = "## Safety"]
12937#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12938#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12939#[inline(always)]
12940#[target_feature(enable = "sve,f64mm")]
12941#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12942#[cfg_attr(test, assert_instr(ld1rod))]
12943pub unsafe fn svld1ro_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
12944    unsafe extern "unadjusted" {
12945        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2f64")]
12946        fn _svld1ro_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
12947    }
12948    _svld1ro_f64(pg.sve_into(), base)
12949}
12950#[doc = "Load and replicate 256 bits of data"]
12951#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s8])"]
12952#[doc = "## Safety"]
12953#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12954#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12955#[inline(always)]
12956#[target_feature(enable = "sve,f64mm")]
12957#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12958#[cfg_attr(test, assert_instr(ld1rob))]
12959pub unsafe fn svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t {
12960    unsafe extern "unadjusted" {
12961        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv16i8")]
12962        fn _svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t;
12963    }
12964    _svld1ro_s8(pg, base)
12965}
12966#[doc = "Load and replicate 256 bits of data"]
12967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s16])"]
12968#[doc = "## Safety"]
12969#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12970#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12971#[inline(always)]
12972#[target_feature(enable = "sve,f64mm")]
12973#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12974#[cfg_attr(test, assert_instr(ld1roh))]
12975pub unsafe fn svld1ro_s16(pg: svbool_t, base: *const i16) -> svint16_t {
12976    unsafe extern "unadjusted" {
12977        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv8i16")]
12978        fn _svld1ro_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
12979    }
12980    _svld1ro_s16(pg.sve_into(), base)
12981}
12982#[doc = "Load and replicate 256 bits of data"]
12983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s32])"]
12984#[doc = "## Safety"]
12985#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
12986#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
12987#[inline(always)]
12988#[target_feature(enable = "sve,f64mm")]
12989#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
12990#[cfg_attr(test, assert_instr(ld1row))]
12991pub unsafe fn svld1ro_s32(pg: svbool_t, base: *const i32) -> svint32_t {
12992    unsafe extern "unadjusted" {
12993        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4i32")]
12994        fn _svld1ro_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
12995    }
12996    _svld1ro_s32(pg.sve_into(), base)
12997}
12998#[doc = "Load and replicate 256 bits of data"]
12999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s64])"]
13000#[doc = "## Safety"]
13001#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13002#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13003#[inline(always)]
13004#[target_feature(enable = "sve,f64mm")]
13005#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13006#[cfg_attr(test, assert_instr(ld1rod))]
13007pub unsafe fn svld1ro_s64(pg: svbool_t, base: *const i64) -> svint64_t {
13008    unsafe extern "unadjusted" {
13009        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2i64")]
13010        fn _svld1ro_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
13011    }
13012    _svld1ro_s64(pg.sve_into(), base)
13013}
13014#[doc = "Load and replicate 256 bits of data"]
13015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u8])"]
13016#[doc = "## Safety"]
13017#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13018#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13019#[inline(always)]
13020#[target_feature(enable = "sve,f64mm")]
13021#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13022#[cfg_attr(test, assert_instr(ld1rob))]
13023pub unsafe fn svld1ro_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
13024    svld1ro_s8(pg, base.as_signed()).as_unsigned()
13025}
13026#[doc = "Load and replicate 256 bits of data"]
13027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u16])"]
13028#[doc = "## Safety"]
13029#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13030#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13031#[inline(always)]
13032#[target_feature(enable = "sve,f64mm")]
13033#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13034#[cfg_attr(test, assert_instr(ld1roh))]
13035pub unsafe fn svld1ro_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
13036    svld1ro_s16(pg, base.as_signed()).as_unsigned()
13037}
13038#[doc = "Load and replicate 256 bits of data"]
13039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u32])"]
13040#[doc = "## Safety"]
13041#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13042#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13043#[inline(always)]
13044#[target_feature(enable = "sve,f64mm")]
13045#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13046#[cfg_attr(test, assert_instr(ld1row))]
13047pub unsafe fn svld1ro_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
13048    svld1ro_s32(pg, base.as_signed()).as_unsigned()
13049}
13050#[doc = "Load and replicate 256 bits of data"]
13051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u64])"]
13052#[doc = "## Safety"]
13053#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13054#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13055#[inline(always)]
13056#[target_feature(enable = "sve,f64mm")]
13057#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13058#[cfg_attr(test, assert_instr(ld1rod))]
13059pub unsafe fn svld1ro_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
13060    svld1ro_s64(pg, base.as_signed()).as_unsigned()
13061}
13062#[doc = "Load and replicate 128 bits of data"]
13063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f32])"]
13064#[doc = "## Safety"]
13065#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13066#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13067#[inline(always)]
13068#[target_feature(enable = "sve")]
13069#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13070#[cfg_attr(test, assert_instr(ld1rqw))]
13071pub unsafe fn svld1rq_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
13072    unsafe extern "unadjusted" {
13073        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4f32")]
13074        fn _svld1rq_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
13075    }
13076    _svld1rq_f32(pg.sve_into(), base)
13077}
13078#[doc = "Load and replicate 128 bits of data"]
13079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f64])"]
13080#[doc = "## Safety"]
13081#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13082#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13083#[inline(always)]
13084#[target_feature(enable = "sve")]
13085#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13086#[cfg_attr(test, assert_instr(ld1rqd))]
13087pub unsafe fn svld1rq_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
13088    unsafe extern "unadjusted" {
13089        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2f64")]
13090        fn _svld1rq_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
13091    }
13092    _svld1rq_f64(pg.sve_into(), base)
13093}
13094#[doc = "Load and replicate 128 bits of data"]
13095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s8])"]
13096#[doc = "## Safety"]
13097#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13098#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13099#[inline(always)]
13100#[target_feature(enable = "sve")]
13101#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13102#[cfg_attr(test, assert_instr(ld1rqb))]
13103pub unsafe fn svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t {
13104    unsafe extern "unadjusted" {
13105        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv16i8")]
13106        fn _svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t;
13107    }
13108    _svld1rq_s8(pg, base)
13109}
13110#[doc = "Load and replicate 128 bits of data"]
13111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s16])"]
13112#[doc = "## Safety"]
13113#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13114#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13115#[inline(always)]
13116#[target_feature(enable = "sve")]
13117#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13118#[cfg_attr(test, assert_instr(ld1rqh))]
13119pub unsafe fn svld1rq_s16(pg: svbool_t, base: *const i16) -> svint16_t {
13120    unsafe extern "unadjusted" {
13121        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv8i16")]
13122        fn _svld1rq_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
13123    }
13124    _svld1rq_s16(pg.sve_into(), base)
13125}
13126#[doc = "Load and replicate 128 bits of data"]
13127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s32])"]
13128#[doc = "## Safety"]
13129#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13130#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13131#[inline(always)]
13132#[target_feature(enable = "sve")]
13133#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13134#[cfg_attr(test, assert_instr(ld1rqw))]
13135pub unsafe fn svld1rq_s32(pg: svbool_t, base: *const i32) -> svint32_t {
13136    unsafe extern "unadjusted" {
13137        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4i32")]
13138        fn _svld1rq_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
13139    }
13140    _svld1rq_s32(pg.sve_into(), base)
13141}
13142#[doc = "Load and replicate 128 bits of data"]
13143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s64])"]
13144#[doc = "## Safety"]
13145#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13146#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13147#[inline(always)]
13148#[target_feature(enable = "sve")]
13149#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13150#[cfg_attr(test, assert_instr(ld1rqd))]
13151pub unsafe fn svld1rq_s64(pg: svbool_t, base: *const i64) -> svint64_t {
13152    unsafe extern "unadjusted" {
13153        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2i64")]
13154        fn _svld1rq_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
13155    }
13156    _svld1rq_s64(pg.sve_into(), base)
13157}
13158#[doc = "Load and replicate 128 bits of data"]
13159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u8])"]
13160#[doc = "## Safety"]
13161#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13162#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13163#[inline(always)]
13164#[target_feature(enable = "sve")]
13165#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13166#[cfg_attr(test, assert_instr(ld1rqb))]
13167pub unsafe fn svld1rq_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
13168    svld1rq_s8(pg, base.as_signed()).as_unsigned()
13169}
13170#[doc = "Load and replicate 128 bits of data"]
13171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u16])"]
13172#[doc = "## Safety"]
13173#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13174#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13175#[inline(always)]
13176#[target_feature(enable = "sve")]
13177#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13178#[cfg_attr(test, assert_instr(ld1rqh))]
13179pub unsafe fn svld1rq_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
13180    svld1rq_s16(pg, base.as_signed()).as_unsigned()
13181}
13182#[doc = "Load and replicate 128 bits of data"]
13183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u32])"]
13184#[doc = "## Safety"]
13185#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13186#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13187#[inline(always)]
13188#[target_feature(enable = "sve")]
13189#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13190#[cfg_attr(test, assert_instr(ld1rqw))]
13191pub unsafe fn svld1rq_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
13192    svld1rq_s32(pg, base.as_signed()).as_unsigned()
13193}
13194#[doc = "Load and replicate 128 bits of data"]
13195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u64])"]
13196#[doc = "## Safety"]
13197#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13198#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13199#[inline(always)]
13200#[target_feature(enable = "sve")]
13201#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13202#[cfg_attr(test, assert_instr(ld1rqd))]
13203pub unsafe fn svld1rq_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
13204    svld1rq_s64(pg, base.as_signed()).as_unsigned()
13205}
13206#[doc = "Load 8-bit data and sign-extend"]
13207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_s32)"]
13208#[doc = "## Safety"]
13209#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13210#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13211#[inline(always)]
13212#[target_feature(enable = "sve")]
13213#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13214#[cfg_attr(test, assert_instr(ld1sb))]
13215pub unsafe fn svld1sb_gather_s32offset_s32(
13216    pg: svbool_t,
13217    base: *const i8,
13218    offsets: svint32_t,
13219) -> svint32_t {
13220    unsafe extern "unadjusted" {
13221        #[cfg_attr(
13222            target_arch = "aarch64",
13223            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8"
13224        )]
13225        fn _svld1sb_gather_s32offset_s32(
13226            pg: svbool4_t,
13227            base: *const i8,
13228            offsets: svint32_t,
13229        ) -> nxv4i8;
13230    }
13231    crate::intrinsics::simd::simd_cast(_svld1sb_gather_s32offset_s32(pg.sve_into(), base, offsets))
13232}
13233#[doc = "Load 16-bit data and sign-extend"]
13234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_s32)"]
13235#[doc = "## Safety"]
13236#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13237#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13238#[inline(always)]
13239#[target_feature(enable = "sve")]
13240#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13241#[cfg_attr(test, assert_instr(ld1sh))]
13242pub unsafe fn svld1sh_gather_s32offset_s32(
13243    pg: svbool_t,
13244    base: *const i16,
13245    offsets: svint32_t,
13246) -> svint32_t {
13247    unsafe extern "unadjusted" {
13248        #[cfg_attr(
13249            target_arch = "aarch64",
13250            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16"
13251        )]
13252        fn _svld1sh_gather_s32offset_s32(
13253            pg: svbool4_t,
13254            base: *const i16,
13255            offsets: svint32_t,
13256        ) -> nxv4i16;
13257    }
13258    crate::intrinsics::simd::simd_cast(_svld1sh_gather_s32offset_s32(pg.sve_into(), base, offsets))
13259}
13260#[doc = "Load 8-bit data and sign-extend"]
13261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_u32)"]
13262#[doc = "## Safety"]
13263#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13264#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13265#[inline(always)]
13266#[target_feature(enable = "sve")]
13267#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13268#[cfg_attr(test, assert_instr(ld1sb))]
13269pub unsafe fn svld1sb_gather_s32offset_u32(
13270    pg: svbool_t,
13271    base: *const i8,
13272    offsets: svint32_t,
13273) -> svuint32_t {
13274    svld1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned()
13275}
13276#[doc = "Load 16-bit data and sign-extend"]
13277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_u32)"]
13278#[doc = "## Safety"]
13279#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13280#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13281#[inline(always)]
13282#[target_feature(enable = "sve")]
13283#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13284#[cfg_attr(test, assert_instr(ld1sh))]
13285pub unsafe fn svld1sh_gather_s32offset_u32(
13286    pg: svbool_t,
13287    base: *const i16,
13288    offsets: svint32_t,
13289) -> svuint32_t {
13290    svld1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned()
13291}
13292#[doc = "Load 8-bit data and sign-extend"]
13293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_s64)"]
13294#[doc = "## Safety"]
13295#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13296#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13297#[inline(always)]
13298#[target_feature(enable = "sve")]
13299#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13300#[cfg_attr(test, assert_instr(ld1sb))]
13301pub unsafe fn svld1sb_gather_s64offset_s64(
13302    pg: svbool_t,
13303    base: *const i8,
13304    offsets: svint64_t,
13305) -> svint64_t {
13306    unsafe extern "unadjusted" {
13307        #[cfg_attr(
13308            target_arch = "aarch64",
13309            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8"
13310        )]
13311        fn _svld1sb_gather_s64offset_s64(
13312            pg: svbool2_t,
13313            base: *const i8,
13314            offsets: svint64_t,
13315        ) -> nxv2i8;
13316    }
13317    crate::intrinsics::simd::simd_cast(_svld1sb_gather_s64offset_s64(pg.sve_into(), base, offsets))
13318}
13319#[doc = "Load 16-bit data and sign-extend"]
13320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_s64)"]
13321#[doc = "## Safety"]
13322#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13323#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13324#[inline(always)]
13325#[target_feature(enable = "sve")]
13326#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13327#[cfg_attr(test, assert_instr(ld1sh))]
13328pub unsafe fn svld1sh_gather_s64offset_s64(
13329    pg: svbool_t,
13330    base: *const i16,
13331    offsets: svint64_t,
13332) -> svint64_t {
13333    unsafe extern "unadjusted" {
13334        #[cfg_attr(
13335            target_arch = "aarch64",
13336            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16"
13337        )]
13338        fn _svld1sh_gather_s64offset_s64(
13339            pg: svbool2_t,
13340            base: *const i16,
13341            offsets: svint64_t,
13342        ) -> nxv2i16;
13343    }
13344    crate::intrinsics::simd::simd_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets))
13345}
13346#[doc = "Load 32-bit data and sign-extend"]
13347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_s64)"]
13348#[doc = "## Safety"]
13349#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13350#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13351#[inline(always)]
13352#[target_feature(enable = "sve")]
13353#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13354#[cfg_attr(test, assert_instr(ld1sw))]
13355pub unsafe fn svld1sw_gather_s64offset_s64(
13356    pg: svbool_t,
13357    base: *const i32,
13358    offsets: svint64_t,
13359) -> svint64_t {
13360    unsafe extern "unadjusted" {
13361        #[cfg_attr(
13362            target_arch = "aarch64",
13363            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32"
13364        )]
13365        fn _svld1sw_gather_s64offset_s64(
13366            pg: svbool2_t,
13367            base: *const i32,
13368            offsets: svint64_t,
13369        ) -> nxv2i32;
13370    }
13371    crate::intrinsics::simd::simd_cast(_svld1sw_gather_s64offset_s64(pg.sve_into(), base, offsets))
13372}
13373#[doc = "Load 8-bit data and sign-extend"]
13374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_u64)"]
13375#[doc = "## Safety"]
13376#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13377#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13378#[inline(always)]
13379#[target_feature(enable = "sve")]
13380#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13381#[cfg_attr(test, assert_instr(ld1sb))]
13382pub unsafe fn svld1sb_gather_s64offset_u64(
13383    pg: svbool_t,
13384    base: *const i8,
13385    offsets: svint64_t,
13386) -> svuint64_t {
13387    svld1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned()
13388}
13389#[doc = "Load 16-bit data and sign-extend"]
13390#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_u64)"]
13391#[doc = "## Safety"]
13392#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13393#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13394#[inline(always)]
13395#[target_feature(enable = "sve")]
13396#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13397#[cfg_attr(test, assert_instr(ld1sh))]
13398pub unsafe fn svld1sh_gather_s64offset_u64(
13399    pg: svbool_t,
13400    base: *const i16,
13401    offsets: svint64_t,
13402) -> svuint64_t {
13403    svld1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned()
13404}
13405#[doc = "Load 32-bit data and sign-extend"]
13406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_u64)"]
13407#[doc = "## Safety"]
13408#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13409#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13410#[inline(always)]
13411#[target_feature(enable = "sve")]
13412#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13413#[cfg_attr(test, assert_instr(ld1sw))]
13414pub unsafe fn svld1sw_gather_s64offset_u64(
13415    pg: svbool_t,
13416    base: *const i32,
13417    offsets: svint64_t,
13418) -> svuint64_t {
13419    svld1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned()
13420}
13421#[doc = "Load 8-bit data and sign-extend"]
13422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_s32)"]
13423#[doc = "## Safety"]
13424#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13425#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13426#[inline(always)]
13427#[target_feature(enable = "sve")]
13428#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13429#[cfg_attr(test, assert_instr(ld1sb))]
13430pub unsafe fn svld1sb_gather_u32offset_s32(
13431    pg: svbool_t,
13432    base: *const i8,
13433    offsets: svuint32_t,
13434) -> svint32_t {
13435    unsafe extern "unadjusted" {
13436        #[cfg_attr(
13437            target_arch = "aarch64",
13438            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8"
13439        )]
13440        fn _svld1sb_gather_u32offset_s32(
13441            pg: svbool4_t,
13442            base: *const i8,
13443            offsets: svint32_t,
13444        ) -> nxv4i8;
13445    }
13446    crate::intrinsics::simd::simd_cast(_svld1sb_gather_u32offset_s32(
13447        pg.sve_into(),
13448        base,
13449        offsets.as_signed(),
13450    ))
13451}
13452#[doc = "Load 16-bit data and sign-extend"]
13453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_s32)"]
13454#[doc = "## Safety"]
13455#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13456#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13457#[inline(always)]
13458#[target_feature(enable = "sve")]
13459#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13460#[cfg_attr(test, assert_instr(ld1sh))]
13461pub unsafe fn svld1sh_gather_u32offset_s32(
13462    pg: svbool_t,
13463    base: *const i16,
13464    offsets: svuint32_t,
13465) -> svint32_t {
13466    unsafe extern "unadjusted" {
13467        #[cfg_attr(
13468            target_arch = "aarch64",
13469            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16"
13470        )]
13471        fn _svld1sh_gather_u32offset_s32(
13472            pg: svbool4_t,
13473            base: *const i16,
13474            offsets: svint32_t,
13475        ) -> nxv4i16;
13476    }
13477    crate::intrinsics::simd::simd_cast(_svld1sh_gather_u32offset_s32(
13478        pg.sve_into(),
13479        base,
13480        offsets.as_signed(),
13481    ))
13482}
13483#[doc = "Load 8-bit data and sign-extend"]
13484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_u32)"]
13485#[doc = "## Safety"]
13486#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13487#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13488#[inline(always)]
13489#[target_feature(enable = "sve")]
13490#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13491#[cfg_attr(test, assert_instr(ld1sb))]
13492pub unsafe fn svld1sb_gather_u32offset_u32(
13493    pg: svbool_t,
13494    base: *const i8,
13495    offsets: svuint32_t,
13496) -> svuint32_t {
13497    svld1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned()
13498}
13499#[doc = "Load 16-bit data and sign-extend"]
13500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_u32)"]
13501#[doc = "## Safety"]
13502#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13503#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13504#[inline(always)]
13505#[target_feature(enable = "sve")]
13506#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13507#[cfg_attr(test, assert_instr(ld1sh))]
13508pub unsafe fn svld1sh_gather_u32offset_u32(
13509    pg: svbool_t,
13510    base: *const i16,
13511    offsets: svuint32_t,
13512) -> svuint32_t {
13513    svld1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned()
13514}
13515#[doc = "Load 8-bit data and sign-extend"]
13516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_s64)"]
13517#[doc = "## Safety"]
13518#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13519#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13520#[inline(always)]
13521#[target_feature(enable = "sve")]
13522#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13523#[cfg_attr(test, assert_instr(ld1sb))]
13524pub unsafe fn svld1sb_gather_u64offset_s64(
13525    pg: svbool_t,
13526    base: *const i8,
13527    offsets: svuint64_t,
13528) -> svint64_t {
13529    svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed())
13530}
13531#[doc = "Load 16-bit data and sign-extend"]
13532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_s64)"]
13533#[doc = "## Safety"]
13534#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13535#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13536#[inline(always)]
13537#[target_feature(enable = "sve")]
13538#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13539#[cfg_attr(test, assert_instr(ld1sh))]
13540pub unsafe fn svld1sh_gather_u64offset_s64(
13541    pg: svbool_t,
13542    base: *const i16,
13543    offsets: svuint64_t,
13544) -> svint64_t {
13545    svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed())
13546}
13547#[doc = "Load 32-bit data and sign-extend"]
13548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_s64)"]
13549#[doc = "## Safety"]
13550#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13551#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13552#[inline(always)]
13553#[target_feature(enable = "sve")]
13554#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13555#[cfg_attr(test, assert_instr(ld1sw))]
13556pub unsafe fn svld1sw_gather_u64offset_s64(
13557    pg: svbool_t,
13558    base: *const i32,
13559    offsets: svuint64_t,
13560) -> svint64_t {
13561    svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed())
13562}
13563#[doc = "Load 8-bit data and sign-extend"]
13564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_u64)"]
13565#[doc = "## Safety"]
13566#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13567#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13568#[inline(always)]
13569#[target_feature(enable = "sve")]
13570#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13571#[cfg_attr(test, assert_instr(ld1sb))]
13572pub unsafe fn svld1sb_gather_u64offset_u64(
13573    pg: svbool_t,
13574    base: *const i8,
13575    offsets: svuint64_t,
13576) -> svuint64_t {
13577    svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
13578}
13579#[doc = "Load 16-bit data and sign-extend"]
13580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_u64)"]
13581#[doc = "## Safety"]
13582#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13583#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13584#[inline(always)]
13585#[target_feature(enable = "sve")]
13586#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13587#[cfg_attr(test, assert_instr(ld1sh))]
13588pub unsafe fn svld1sh_gather_u64offset_u64(
13589    pg: svbool_t,
13590    base: *const i16,
13591    offsets: svuint64_t,
13592) -> svuint64_t {
13593    svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
13594}
13595#[doc = "Load 32-bit data and sign-extend"]
13596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_u64)"]
13597#[doc = "## Safety"]
13598#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13599#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13600#[inline(always)]
13601#[target_feature(enable = "sve")]
13602#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13603#[cfg_attr(test, assert_instr(ld1sw))]
13604pub unsafe fn svld1sw_gather_u64offset_u64(
13605    pg: svbool_t,
13606    base: *const i32,
13607    offsets: svuint64_t,
13608) -> svuint64_t {
13609    svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
13610}
13611#[doc = "Load 8-bit data and sign-extend"]
13612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_s32)"]
13613#[doc = "## Safety"]
13614#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13615#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13616#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13617#[inline(always)]
13618#[target_feature(enable = "sve")]
13619#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13620#[cfg_attr(test, assert_instr(ld1sb))]
13621pub unsafe fn svld1sb_gather_u32base_offset_s32(
13622    pg: svbool_t,
13623    bases: svuint32_t,
13624    offset: i64,
13625) -> svint32_t {
13626    unsafe extern "unadjusted" {
13627        #[cfg_attr(
13628            target_arch = "aarch64",
13629            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32"
13630        )]
13631        fn _svld1sb_gather_u32base_offset_s32(
13632            pg: svbool4_t,
13633            bases: svint32_t,
13634            offset: i64,
13635        ) -> nxv4i8;
13636    }
13637    crate::intrinsics::simd::simd_cast(_svld1sb_gather_u32base_offset_s32(
13638        pg.sve_into(),
13639        bases.as_signed(),
13640        offset,
13641    ))
13642}
13643#[doc = "Load 16-bit data and sign-extend"]
13644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_s32)"]
13645#[doc = "## Safety"]
13646#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13647#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13648#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13649#[inline(always)]
13650#[target_feature(enable = "sve")]
13651#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13652#[cfg_attr(test, assert_instr(ld1sh))]
13653pub unsafe fn svld1sh_gather_u32base_offset_s32(
13654    pg: svbool_t,
13655    bases: svuint32_t,
13656    offset: i64,
13657) -> svint32_t {
13658    unsafe extern "unadjusted" {
13659        #[cfg_attr(
13660            target_arch = "aarch64",
13661            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32"
13662        )]
13663        fn _svld1sh_gather_u32base_offset_s32(
13664            pg: svbool4_t,
13665            bases: svint32_t,
13666            offset: i64,
13667        ) -> nxv4i16;
13668    }
13669    crate::intrinsics::simd::simd_cast(_svld1sh_gather_u32base_offset_s32(
13670        pg.sve_into(),
13671        bases.as_signed(),
13672        offset,
13673    ))
13674}
13675#[doc = "Load 8-bit data and sign-extend"]
13676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_u32)"]
13677#[doc = "## Safety"]
13678#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13679#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13680#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13681#[inline(always)]
13682#[target_feature(enable = "sve")]
13683#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13684#[cfg_attr(test, assert_instr(ld1sb))]
13685pub unsafe fn svld1sb_gather_u32base_offset_u32(
13686    pg: svbool_t,
13687    bases: svuint32_t,
13688    offset: i64,
13689) -> svuint32_t {
13690    svld1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
13691}
13692#[doc = "Load 16-bit data and sign-extend"]
13693#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_u32)"]
13694#[doc = "## Safety"]
13695#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13696#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13697#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13698#[inline(always)]
13699#[target_feature(enable = "sve")]
13700#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13701#[cfg_attr(test, assert_instr(ld1sh))]
13702pub unsafe fn svld1sh_gather_u32base_offset_u32(
13703    pg: svbool_t,
13704    bases: svuint32_t,
13705    offset: i64,
13706) -> svuint32_t {
13707    svld1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
13708}
13709#[doc = "Load 8-bit data and sign-extend"]
13710#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_s64)"]
13711#[doc = "## Safety"]
13712#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13713#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13714#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13715#[inline(always)]
13716#[target_feature(enable = "sve")]
13717#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13718#[cfg_attr(test, assert_instr(ld1sb))]
13719pub unsafe fn svld1sb_gather_u64base_offset_s64(
13720    pg: svbool_t,
13721    bases: svuint64_t,
13722    offset: i64,
13723) -> svint64_t {
13724    unsafe extern "unadjusted" {
13725        #[cfg_attr(
13726            target_arch = "aarch64",
13727            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64"
13728        )]
13729        fn _svld1sb_gather_u64base_offset_s64(
13730            pg: svbool2_t,
13731            bases: svint64_t,
13732            offset: i64,
13733        ) -> nxv2i8;
13734    }
13735    crate::intrinsics::simd::simd_cast(_svld1sb_gather_u64base_offset_s64(
13736        pg.sve_into(),
13737        bases.as_signed(),
13738        offset,
13739    ))
13740}
13741#[doc = "Load 16-bit data and sign-extend"]
13742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_s64)"]
13743#[doc = "## Safety"]
13744#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13745#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13746#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13747#[inline(always)]
13748#[target_feature(enable = "sve")]
13749#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13750#[cfg_attr(test, assert_instr(ld1sh))]
13751pub unsafe fn svld1sh_gather_u64base_offset_s64(
13752    pg: svbool_t,
13753    bases: svuint64_t,
13754    offset: i64,
13755) -> svint64_t {
13756    unsafe extern "unadjusted" {
13757        #[cfg_attr(
13758            target_arch = "aarch64",
13759            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64"
13760        )]
13761        fn _svld1sh_gather_u64base_offset_s64(
13762            pg: svbool2_t,
13763            bases: svint64_t,
13764            offset: i64,
13765        ) -> nxv2i16;
13766    }
13767    crate::intrinsics::simd::simd_cast(_svld1sh_gather_u64base_offset_s64(
13768        pg.sve_into(),
13769        bases.as_signed(),
13770        offset,
13771    ))
13772}
13773#[doc = "Load 32-bit data and sign-extend"]
13774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_s64)"]
13775#[doc = "## Safety"]
13776#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13777#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13778#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13779#[inline(always)]
13780#[target_feature(enable = "sve")]
13781#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13782#[cfg_attr(test, assert_instr(ld1sw))]
13783pub unsafe fn svld1sw_gather_u64base_offset_s64(
13784    pg: svbool_t,
13785    bases: svuint64_t,
13786    offset: i64,
13787) -> svint64_t {
13788    unsafe extern "unadjusted" {
13789        #[cfg_attr(
13790            target_arch = "aarch64",
13791            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64"
13792        )]
13793        fn _svld1sw_gather_u64base_offset_s64(
13794            pg: svbool2_t,
13795            bases: svint64_t,
13796            offset: i64,
13797        ) -> nxv2i32;
13798    }
13799    crate::intrinsics::simd::simd_cast(_svld1sw_gather_u64base_offset_s64(
13800        pg.sve_into(),
13801        bases.as_signed(),
13802        offset,
13803    ))
13804}
13805#[doc = "Load 8-bit data and sign-extend"]
13806#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_u64)"]
13807#[doc = "## Safety"]
13808#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13809#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13810#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13811#[inline(always)]
13812#[target_feature(enable = "sve")]
13813#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13814#[cfg_attr(test, assert_instr(ld1sb))]
13815pub unsafe fn svld1sb_gather_u64base_offset_u64(
13816    pg: svbool_t,
13817    bases: svuint64_t,
13818    offset: i64,
13819) -> svuint64_t {
13820    svld1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
13821}
13822#[doc = "Load 16-bit data and sign-extend"]
13823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_u64)"]
13824#[doc = "## Safety"]
13825#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13826#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13827#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13828#[inline(always)]
13829#[target_feature(enable = "sve")]
13830#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13831#[cfg_attr(test, assert_instr(ld1sh))]
13832pub unsafe fn svld1sh_gather_u64base_offset_u64(
13833    pg: svbool_t,
13834    bases: svuint64_t,
13835    offset: i64,
13836) -> svuint64_t {
13837    svld1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
13838}
13839#[doc = "Load 32-bit data and sign-extend"]
13840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_u64)"]
13841#[doc = "## Safety"]
13842#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13843#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13844#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13845#[inline(always)]
13846#[target_feature(enable = "sve")]
13847#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13848#[cfg_attr(test, assert_instr(ld1sw))]
13849pub unsafe fn svld1sw_gather_u64base_offset_u64(
13850    pg: svbool_t,
13851    bases: svuint64_t,
13852    offset: i64,
13853) -> svuint64_t {
13854    svld1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
13855}
13856#[doc = "Load 8-bit data and sign-extend"]
13857#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_s32)"]
13858#[doc = "## Safety"]
13859#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13860#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13861#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13862#[inline(always)]
13863#[target_feature(enable = "sve")]
13864#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13865#[cfg_attr(test, assert_instr(ld1sb))]
13866pub unsafe fn svld1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
13867    svld1sb_gather_u32base_offset_s32(pg, bases, 0)
13868}
13869#[doc = "Load 16-bit data and sign-extend"]
13870#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_s32)"]
13871#[doc = "## Safety"]
13872#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13873#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13874#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13875#[inline(always)]
13876#[target_feature(enable = "sve")]
13877#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13878#[cfg_attr(test, assert_instr(ld1sh))]
13879pub unsafe fn svld1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
13880    svld1sh_gather_u32base_offset_s32(pg, bases, 0)
13881}
13882#[doc = "Load 8-bit data and sign-extend"]
13883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_u32)"]
13884#[doc = "## Safety"]
13885#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13886#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13887#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13888#[inline(always)]
13889#[target_feature(enable = "sve")]
13890#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13891#[cfg_attr(test, assert_instr(ld1sb))]
13892pub unsafe fn svld1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
13893    svld1sb_gather_u32base_offset_u32(pg, bases, 0)
13894}
13895#[doc = "Load 16-bit data and sign-extend"]
13896#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_u32)"]
13897#[doc = "## Safety"]
13898#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13899#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13900#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13901#[inline(always)]
13902#[target_feature(enable = "sve")]
13903#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13904#[cfg_attr(test, assert_instr(ld1sh))]
13905pub unsafe fn svld1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
13906    svld1sh_gather_u32base_offset_u32(pg, bases, 0)
13907}
13908#[doc = "Load 8-bit data and sign-extend"]
13909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_s64)"]
13910#[doc = "## Safety"]
13911#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13912#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13913#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13914#[inline(always)]
13915#[target_feature(enable = "sve")]
13916#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13917#[cfg_attr(test, assert_instr(ld1sb))]
13918pub unsafe fn svld1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
13919    svld1sb_gather_u64base_offset_s64(pg, bases, 0)
13920}
13921#[doc = "Load 16-bit data and sign-extend"]
13922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_s64)"]
13923#[doc = "## Safety"]
13924#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13925#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13926#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13927#[inline(always)]
13928#[target_feature(enable = "sve")]
13929#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13930#[cfg_attr(test, assert_instr(ld1sh))]
13931pub unsafe fn svld1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
13932    svld1sh_gather_u64base_offset_s64(pg, bases, 0)
13933}
13934#[doc = "Load 32-bit data and sign-extend"]
13935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_s64)"]
13936#[doc = "## Safety"]
13937#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13938#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13939#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13940#[inline(always)]
13941#[target_feature(enable = "sve")]
13942#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13943#[cfg_attr(test, assert_instr(ld1sw))]
13944pub unsafe fn svld1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
13945    svld1sw_gather_u64base_offset_s64(pg, bases, 0)
13946}
13947#[doc = "Load 8-bit data and sign-extend"]
13948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_u64)"]
13949#[doc = "## Safety"]
13950#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13951#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13952#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13953#[inline(always)]
13954#[target_feature(enable = "sve")]
13955#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13956#[cfg_attr(test, assert_instr(ld1sb))]
13957pub unsafe fn svld1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
13958    svld1sb_gather_u64base_offset_u64(pg, bases, 0)
13959}
13960#[doc = "Load 16-bit data and sign-extend"]
13961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_u64)"]
13962#[doc = "## Safety"]
13963#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13964#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13965#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13966#[inline(always)]
13967#[target_feature(enable = "sve")]
13968#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13969#[cfg_attr(test, assert_instr(ld1sh))]
13970pub unsafe fn svld1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
13971    svld1sh_gather_u64base_offset_u64(pg, bases, 0)
13972}
13973#[doc = "Load 32-bit data and sign-extend"]
13974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_u64)"]
13975#[doc = "## Safety"]
13976#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13977#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13978#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
13979#[inline(always)]
13980#[target_feature(enable = "sve")]
13981#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13982#[cfg_attr(test, assert_instr(ld1sw))]
13983pub unsafe fn svld1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
13984    svld1sw_gather_u64base_offset_u64(pg, bases, 0)
13985}
13986#[doc = "Load 8-bit data and sign-extend"]
13987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s16)"]
13988#[doc = "## Safety"]
13989#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
13990#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
13991#[inline(always)]
13992#[target_feature(enable = "sve")]
13993#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
13994#[cfg_attr(test, assert_instr(ld1sb))]
13995pub unsafe fn svld1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t {
13996    unsafe extern "unadjusted" {
13997        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")]
13998        fn _svld1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
13999    }
14000    crate::intrinsics::simd::simd_cast(_svld1sb_s16(pg.sve_into(), base))
14001}
14002#[doc = "Load 8-bit data and sign-extend"]
14003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s32)"]
14004#[doc = "## Safety"]
14005#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14006#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14007#[inline(always)]
14008#[target_feature(enable = "sve")]
14009#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14010#[cfg_attr(test, assert_instr(ld1sb))]
14011pub unsafe fn svld1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t {
14012    unsafe extern "unadjusted" {
14013        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")]
14014        fn _svld1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
14015    }
14016    crate::intrinsics::simd::simd_cast(_svld1sb_s32(pg.sve_into(), base))
14017}
14018#[doc = "Load 16-bit data and sign-extend"]
14019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s32)"]
14020#[doc = "## Safety"]
14021#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14022#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14023#[inline(always)]
14024#[target_feature(enable = "sve")]
14025#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14026#[cfg_attr(test, assert_instr(ld1sh))]
14027pub unsafe fn svld1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t {
14028    unsafe extern "unadjusted" {
14029        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")]
14030        fn _svld1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
14031    }
14032    crate::intrinsics::simd::simd_cast(_svld1sh_s32(pg.sve_into(), base))
14033}
14034#[doc = "Load 8-bit data and sign-extend"]
14035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s64)"]
14036#[doc = "## Safety"]
14037#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14038#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14039#[inline(always)]
14040#[target_feature(enable = "sve")]
14041#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14042#[cfg_attr(test, assert_instr(ld1sb))]
14043pub unsafe fn svld1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t {
14044    unsafe extern "unadjusted" {
14045        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")]
14046        fn _svld1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
14047    }
14048    crate::intrinsics::simd::simd_cast(_svld1sb_s64(pg.sve_into(), base))
14049}
14050#[doc = "Load 16-bit data and sign-extend"]
14051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s64)"]
14052#[doc = "## Safety"]
14053#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14054#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14055#[inline(always)]
14056#[target_feature(enable = "sve")]
14057#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14058#[cfg_attr(test, assert_instr(ld1sh))]
14059pub unsafe fn svld1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t {
14060    unsafe extern "unadjusted" {
14061        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")]
14062        fn _svld1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
14063    }
14064    crate::intrinsics::simd::simd_cast(_svld1sh_s64(pg.sve_into(), base))
14065}
14066#[doc = "Load 32-bit data and sign-extend"]
14067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_s64)"]
14068#[doc = "## Safety"]
14069#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14070#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14071#[inline(always)]
14072#[target_feature(enable = "sve")]
14073#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14074#[cfg_attr(test, assert_instr(ld1sw))]
14075pub unsafe fn svld1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t {
14076    unsafe extern "unadjusted" {
14077        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")]
14078        fn _svld1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
14079    }
14080    crate::intrinsics::simd::simd_cast(_svld1sw_s64(pg.sve_into(), base))
14081}
14082#[doc = "Load 8-bit data and sign-extend"]
14083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u16)"]
14084#[doc = "## Safety"]
14085#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14086#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14087#[inline(always)]
14088#[target_feature(enable = "sve")]
14089#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14090#[cfg_attr(test, assert_instr(ld1sb))]
14091pub unsafe fn svld1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t {
14092    svld1sb_s16(pg, base).as_unsigned()
14093}
14094#[doc = "Load 8-bit data and sign-extend"]
14095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u32)"]
14096#[doc = "## Safety"]
14097#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14098#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14099#[inline(always)]
14100#[target_feature(enable = "sve")]
14101#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14102#[cfg_attr(test, assert_instr(ld1sb))]
14103pub unsafe fn svld1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t {
14104    svld1sb_s32(pg, base).as_unsigned()
14105}
14106#[doc = "Load 16-bit data and sign-extend"]
14107#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u32)"]
14108#[doc = "## Safety"]
14109#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14110#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14111#[inline(always)]
14112#[target_feature(enable = "sve")]
14113#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14114#[cfg_attr(test, assert_instr(ld1sh))]
14115pub unsafe fn svld1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t {
14116    svld1sh_s32(pg, base).as_unsigned()
14117}
14118#[doc = "Load 8-bit data and sign-extend"]
14119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u64)"]
14120#[doc = "## Safety"]
14121#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14122#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14123#[inline(always)]
14124#[target_feature(enable = "sve")]
14125#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14126#[cfg_attr(test, assert_instr(ld1sb))]
14127pub unsafe fn svld1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t {
14128    svld1sb_s64(pg, base).as_unsigned()
14129}
14130#[doc = "Load 16-bit data and sign-extend"]
14131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u64)"]
14132#[doc = "## Safety"]
14133#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14134#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14135#[inline(always)]
14136#[target_feature(enable = "sve")]
14137#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14138#[cfg_attr(test, assert_instr(ld1sh))]
14139pub unsafe fn svld1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t {
14140    svld1sh_s64(pg, base).as_unsigned()
14141}
14142#[doc = "Load 32-bit data and sign-extend"]
14143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_u64)"]
14144#[doc = "## Safety"]
14145#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14146#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14147#[inline(always)]
14148#[target_feature(enable = "sve")]
14149#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14150#[cfg_attr(test, assert_instr(ld1sw))]
14151pub unsafe fn svld1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t {
14152    svld1sw_s64(pg, base).as_unsigned()
14153}
14154#[doc = "Load 8-bit data and sign-extend"]
14155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s16)"]
14156#[doc = "## Safety"]
14157#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14158#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14159#[inline(always)]
14160#[target_feature(enable = "sve")]
14161#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14162#[cfg_attr(test, assert_instr(ld1sb))]
14163pub unsafe fn svld1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t {
14164    svld1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize))
14165}
14166#[doc = "Load 8-bit data and sign-extend"]
14167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s32)"]
14168#[doc = "## Safety"]
14169#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14170#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14171#[inline(always)]
14172#[target_feature(enable = "sve")]
14173#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14174#[cfg_attr(test, assert_instr(ld1sb))]
14175pub unsafe fn svld1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t {
14176    svld1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize))
14177}
14178#[doc = "Load 16-bit data and sign-extend"]
14179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s32)"]
14180#[doc = "## Safety"]
14181#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14182#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14183#[inline(always)]
14184#[target_feature(enable = "sve")]
14185#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14186#[cfg_attr(test, assert_instr(ld1sh))]
14187pub unsafe fn svld1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t {
14188    svld1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
14189}
14190#[doc = "Load 8-bit data and sign-extend"]
14191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s64)"]
14192#[doc = "## Safety"]
14193#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14194#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14195#[inline(always)]
14196#[target_feature(enable = "sve")]
14197#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14198#[cfg_attr(test, assert_instr(ld1sb))]
14199pub unsafe fn svld1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t {
14200    svld1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize))
14201}
14202#[doc = "Load 16-bit data and sign-extend"]
14203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s64)"]
14204#[doc = "## Safety"]
14205#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14206#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14207#[inline(always)]
14208#[target_feature(enable = "sve")]
14209#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14210#[cfg_attr(test, assert_instr(ld1sh))]
14211pub unsafe fn svld1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t {
14212    svld1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
14213}
14214#[doc = "Load 32-bit data and sign-extend"]
14215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_s64)"]
14216#[doc = "## Safety"]
14217#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14218#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14219#[inline(always)]
14220#[target_feature(enable = "sve")]
14221#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14222#[cfg_attr(test, assert_instr(ld1sw))]
14223pub unsafe fn svld1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t {
14224    svld1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
14225}
14226#[doc = "Load 8-bit data and sign-extend"]
14227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u16)"]
14228#[doc = "## Safety"]
14229#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14230#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14231#[inline(always)]
14232#[target_feature(enable = "sve")]
14233#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14234#[cfg_attr(test, assert_instr(ld1sb))]
14235pub unsafe fn svld1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t {
14236    svld1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize))
14237}
14238#[doc = "Load 8-bit data and sign-extend"]
14239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u32)"]
14240#[doc = "## Safety"]
14241#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14242#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14243#[inline(always)]
14244#[target_feature(enable = "sve")]
14245#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14246#[cfg_attr(test, assert_instr(ld1sb))]
14247pub unsafe fn svld1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t {
14248    svld1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize))
14249}
14250#[doc = "Load 16-bit data and sign-extend"]
14251#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u32)"]
14252#[doc = "## Safety"]
14253#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14254#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14255#[inline(always)]
14256#[target_feature(enable = "sve")]
14257#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14258#[cfg_attr(test, assert_instr(ld1sh))]
14259pub unsafe fn svld1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t {
14260    svld1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
14261}
14262#[doc = "Load 8-bit data and sign-extend"]
14263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u64)"]
14264#[doc = "## Safety"]
14265#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14266#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14267#[inline(always)]
14268#[target_feature(enable = "sve")]
14269#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14270#[cfg_attr(test, assert_instr(ld1sb))]
14271pub unsafe fn svld1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t {
14272    svld1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize))
14273}
14274#[doc = "Load 16-bit data and sign-extend"]
14275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u64)"]
14276#[doc = "## Safety"]
14277#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14278#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14279#[inline(always)]
14280#[target_feature(enable = "sve")]
14281#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14282#[cfg_attr(test, assert_instr(ld1sh))]
14283pub unsafe fn svld1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t {
14284    svld1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
14285}
14286#[doc = "Load 32-bit data and sign-extend"]
14287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_u64)"]
14288#[doc = "## Safety"]
14289#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
14290#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14291#[inline(always)]
14292#[target_feature(enable = "sve")]
14293#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14294#[cfg_attr(test, assert_instr(ld1sw))]
14295pub unsafe fn svld1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t {
14296    svld1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
14297}
14298#[doc = "Load 16-bit data and sign-extend"]
14299#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_s32)"]
14300#[doc = "## Safety"]
14301#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14302#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14303#[inline(always)]
14304#[target_feature(enable = "sve")]
14305#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14306#[cfg_attr(test, assert_instr(ld1sh))]
14307pub unsafe fn svld1sh_gather_s32index_s32(
14308    pg: svbool_t,
14309    base: *const i16,
14310    indices: svint32_t,
14311) -> svint32_t {
14312    unsafe extern "unadjusted" {
14313        #[cfg_attr(
14314            target_arch = "aarch64",
14315            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16"
14316        )]
14317        fn _svld1sh_gather_s32index_s32(
14318            pg: svbool4_t,
14319            base: *const i16,
14320            indices: svint32_t,
14321        ) -> nxv4i16;
14322    }
14323    crate::intrinsics::simd::simd_cast(_svld1sh_gather_s32index_s32(pg.sve_into(), base, indices))
14324}
14325#[doc = "Load 16-bit data and sign-extend"]
14326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_u32)"]
14327#[doc = "## Safety"]
14328#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14329#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14330#[inline(always)]
14331#[target_feature(enable = "sve")]
14332#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14333#[cfg_attr(test, assert_instr(ld1sh))]
14334pub unsafe fn svld1sh_gather_s32index_u32(
14335    pg: svbool_t,
14336    base: *const i16,
14337    indices: svint32_t,
14338) -> svuint32_t {
14339    svld1sh_gather_s32index_s32(pg, base, indices).as_unsigned()
14340}
14341#[doc = "Load 16-bit data and sign-extend"]
14342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_s64)"]
14343#[doc = "## Safety"]
14344#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14345#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14346#[inline(always)]
14347#[target_feature(enable = "sve")]
14348#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14349#[cfg_attr(test, assert_instr(ld1sh))]
14350pub unsafe fn svld1sh_gather_s64index_s64(
14351    pg: svbool_t,
14352    base: *const i16,
14353    indices: svint64_t,
14354) -> svint64_t {
14355    unsafe extern "unadjusted" {
14356        #[cfg_attr(
14357            target_arch = "aarch64",
14358            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16"
14359        )]
14360        fn _svld1sh_gather_s64index_s64(
14361            pg: svbool2_t,
14362            base: *const i16,
14363            indices: svint64_t,
14364        ) -> nxv2i16;
14365    }
14366    crate::intrinsics::simd::simd_cast(_svld1sh_gather_s64index_s64(pg.sve_into(), base, indices))
14367}
14368#[doc = "Load 32-bit data and sign-extend"]
14369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_s64)"]
14370#[doc = "## Safety"]
14371#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14372#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14373#[inline(always)]
14374#[target_feature(enable = "sve")]
14375#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14376#[cfg_attr(test, assert_instr(ld1sw))]
14377pub unsafe fn svld1sw_gather_s64index_s64(
14378    pg: svbool_t,
14379    base: *const i32,
14380    indices: svint64_t,
14381) -> svint64_t {
14382    unsafe extern "unadjusted" {
14383        #[cfg_attr(
14384            target_arch = "aarch64",
14385            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32"
14386        )]
14387        fn _svld1sw_gather_s64index_s64(
14388            pg: svbool2_t,
14389            base: *const i32,
14390            indices: svint64_t,
14391        ) -> nxv2i32;
14392    }
14393    crate::intrinsics::simd::simd_cast(_svld1sw_gather_s64index_s64(pg.sve_into(), base, indices))
14394}
14395#[doc = "Load 16-bit data and sign-extend"]
14396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_u64)"]
14397#[doc = "## Safety"]
14398#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14399#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14400#[inline(always)]
14401#[target_feature(enable = "sve")]
14402#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14403#[cfg_attr(test, assert_instr(ld1sh))]
14404pub unsafe fn svld1sh_gather_s64index_u64(
14405    pg: svbool_t,
14406    base: *const i16,
14407    indices: svint64_t,
14408) -> svuint64_t {
14409    svld1sh_gather_s64index_s64(pg, base, indices).as_unsigned()
14410}
14411#[doc = "Load 32-bit data and sign-extend"]
14412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_u64)"]
14413#[doc = "## Safety"]
14414#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14415#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14416#[inline(always)]
14417#[target_feature(enable = "sve")]
14418#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14419#[cfg_attr(test, assert_instr(ld1sw))]
14420pub unsafe fn svld1sw_gather_s64index_u64(
14421    pg: svbool_t,
14422    base: *const i32,
14423    indices: svint64_t,
14424) -> svuint64_t {
14425    svld1sw_gather_s64index_s64(pg, base, indices).as_unsigned()
14426}
14427#[doc = "Load 16-bit data and sign-extend"]
14428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_s32)"]
14429#[doc = "## Safety"]
14430#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14431#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14432#[inline(always)]
14433#[target_feature(enable = "sve")]
14434#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14435#[cfg_attr(test, assert_instr(ld1sh))]
14436pub unsafe fn svld1sh_gather_u32index_s32(
14437    pg: svbool_t,
14438    base: *const i16,
14439    indices: svuint32_t,
14440) -> svint32_t {
14441    unsafe extern "unadjusted" {
14442        #[cfg_attr(
14443            target_arch = "aarch64",
14444            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16"
14445        )]
14446        fn _svld1sh_gather_u32index_s32(
14447            pg: svbool4_t,
14448            base: *const i16,
14449            indices: svint32_t,
14450        ) -> nxv4i16;
14451    }
14452    crate::intrinsics::simd::simd_cast(_svld1sh_gather_u32index_s32(
14453        pg.sve_into(),
14454        base,
14455        indices.as_signed(),
14456    ))
14457}
14458#[doc = "Load 16-bit data and sign-extend"]
14459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_u32)"]
14460#[doc = "## Safety"]
14461#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14462#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14463#[inline(always)]
14464#[target_feature(enable = "sve")]
14465#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14466#[cfg_attr(test, assert_instr(ld1sh))]
14467pub unsafe fn svld1sh_gather_u32index_u32(
14468    pg: svbool_t,
14469    base: *const i16,
14470    indices: svuint32_t,
14471) -> svuint32_t {
14472    svld1sh_gather_u32index_s32(pg, base, indices).as_unsigned()
14473}
14474#[doc = "Load 16-bit data and sign-extend"]
14475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_s64)"]
14476#[doc = "## Safety"]
14477#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14478#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14479#[inline(always)]
14480#[target_feature(enable = "sve")]
14481#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14482#[cfg_attr(test, assert_instr(ld1sh))]
14483pub unsafe fn svld1sh_gather_u64index_s64(
14484    pg: svbool_t,
14485    base: *const i16,
14486    indices: svuint64_t,
14487) -> svint64_t {
14488    svld1sh_gather_s64index_s64(pg, base, indices.as_signed())
14489}
14490#[doc = "Load 32-bit data and sign-extend"]
14491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_s64)"]
14492#[doc = "## Safety"]
14493#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14494#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14495#[inline(always)]
14496#[target_feature(enable = "sve")]
14497#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14498#[cfg_attr(test, assert_instr(ld1sw))]
14499pub unsafe fn svld1sw_gather_u64index_s64(
14500    pg: svbool_t,
14501    base: *const i32,
14502    indices: svuint64_t,
14503) -> svint64_t {
14504    svld1sw_gather_s64index_s64(pg, base, indices.as_signed())
14505}
14506#[doc = "Load 16-bit data and sign-extend"]
14507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_u64)"]
14508#[doc = "## Safety"]
14509#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14510#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14511#[inline(always)]
14512#[target_feature(enable = "sve")]
14513#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14514#[cfg_attr(test, assert_instr(ld1sh))]
14515pub unsafe fn svld1sh_gather_u64index_u64(
14516    pg: svbool_t,
14517    base: *const i16,
14518    indices: svuint64_t,
14519) -> svuint64_t {
14520    svld1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
14521}
14522#[doc = "Load 32-bit data and sign-extend"]
14523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_u64)"]
14524#[doc = "## Safety"]
14525#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14526#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14527#[inline(always)]
14528#[target_feature(enable = "sve")]
14529#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14530#[cfg_attr(test, assert_instr(ld1sw))]
14531pub unsafe fn svld1sw_gather_u64index_u64(
14532    pg: svbool_t,
14533    base: *const i32,
14534    indices: svuint64_t,
14535) -> svuint64_t {
14536    svld1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
14537}
14538#[doc = "Load 16-bit data and sign-extend"]
14539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_s32)"]
14540#[doc = "## Safety"]
14541#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14542#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14543#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
14544#[inline(always)]
14545#[target_feature(enable = "sve")]
14546#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14547#[cfg_attr(test, assert_instr(ld1sh))]
14548pub unsafe fn svld1sh_gather_u32base_index_s32(
14549    pg: svbool_t,
14550    bases: svuint32_t,
14551    index: i64,
14552) -> svint32_t {
14553    svld1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
14554}
14555#[doc = "Load 16-bit data and sign-extend"]
14556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_u32)"]
14557#[doc = "## Safety"]
14558#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14559#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14560#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
14561#[inline(always)]
14562#[target_feature(enable = "sve")]
14563#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14564#[cfg_attr(test, assert_instr(ld1sh))]
14565pub unsafe fn svld1sh_gather_u32base_index_u32(
14566    pg: svbool_t,
14567    bases: svuint32_t,
14568    index: i64,
14569) -> svuint32_t {
14570    svld1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
14571}
14572#[doc = "Load 16-bit data and sign-extend"]
14573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_s64)"]
14574#[doc = "## Safety"]
14575#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14576#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14577#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
14578#[inline(always)]
14579#[target_feature(enable = "sve")]
14580#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14581#[cfg_attr(test, assert_instr(ld1sh))]
14582pub unsafe fn svld1sh_gather_u64base_index_s64(
14583    pg: svbool_t,
14584    bases: svuint64_t,
14585    index: i64,
14586) -> svint64_t {
14587    svld1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
14588}
14589#[doc = "Load 32-bit data and sign-extend"]
14590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_s64)"]
14591#[doc = "## Safety"]
14592#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14593#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14594#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
14595#[inline(always)]
14596#[target_feature(enable = "sve")]
14597#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14598#[cfg_attr(test, assert_instr(ld1sw))]
14599pub unsafe fn svld1sw_gather_u64base_index_s64(
14600    pg: svbool_t,
14601    bases: svuint64_t,
14602    index: i64,
14603) -> svint64_t {
14604    svld1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
14605}
14606#[doc = "Load 16-bit data and sign-extend"]
14607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_u64)"]
14608#[doc = "## Safety"]
14609#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14610#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14611#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
14612#[inline(always)]
14613#[target_feature(enable = "sve")]
14614#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14615#[cfg_attr(test, assert_instr(ld1sh))]
14616pub unsafe fn svld1sh_gather_u64base_index_u64(
14617    pg: svbool_t,
14618    bases: svuint64_t,
14619    index: i64,
14620) -> svuint64_t {
14621    svld1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
14622}
14623#[doc = "Load 32-bit data and sign-extend"]
14624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_u64)"]
14625#[doc = "## Safety"]
14626#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14627#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14628#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
14629#[inline(always)]
14630#[target_feature(enable = "sve")]
14631#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14632#[cfg_attr(test, assert_instr(ld1sw))]
14633pub unsafe fn svld1sw_gather_u64base_index_u64(
14634    pg: svbool_t,
14635    bases: svuint64_t,
14636    index: i64,
14637) -> svuint64_t {
14638    svld1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
14639}
14640#[doc = "Load 8-bit data and zero-extend"]
14641#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_s32)"]
14642#[doc = "## Safety"]
14643#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14644#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14645#[inline(always)]
14646#[target_feature(enable = "sve")]
14647#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14648#[cfg_attr(test, assert_instr(ld1b))]
14649pub unsafe fn svld1ub_gather_s32offset_s32(
14650    pg: svbool_t,
14651    base: *const u8,
14652    offsets: svint32_t,
14653) -> svint32_t {
14654    svld1ub_gather_s32offset_u32(pg, base, offsets).as_signed()
14655}
14656#[doc = "Load 16-bit data and zero-extend"]
14657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_s32)"]
14658#[doc = "## Safety"]
14659#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14660#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14661#[inline(always)]
14662#[target_feature(enable = "sve")]
14663#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14664#[cfg_attr(test, assert_instr(ld1h))]
14665pub unsafe fn svld1uh_gather_s32offset_s32(
14666    pg: svbool_t,
14667    base: *const u16,
14668    offsets: svint32_t,
14669) -> svint32_t {
14670    svld1uh_gather_s32offset_u32(pg, base, offsets).as_signed()
14671}
14672#[doc = "Load 8-bit data and zero-extend"]
14673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_u32)"]
14674#[doc = "## Safety"]
14675#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14676#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14677#[inline(always)]
14678#[target_feature(enable = "sve")]
14679#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14680#[cfg_attr(test, assert_instr(ld1b))]
14681pub unsafe fn svld1ub_gather_s32offset_u32(
14682    pg: svbool_t,
14683    base: *const u8,
14684    offsets: svint32_t,
14685) -> svuint32_t {
14686    unsafe extern "unadjusted" {
14687        #[cfg_attr(
14688            target_arch = "aarch64",
14689            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8"
14690        )]
14691        fn _svld1ub_gather_s32offset_u32(
14692            pg: svbool4_t,
14693            base: *const i8,
14694            offsets: svint32_t,
14695        ) -> nxv4i8;
14696    }
14697    crate::intrinsics::simd::simd_cast::<nxv4u8, _>(
14698        _svld1ub_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
14699    )
14700}
14701#[doc = "Load 16-bit data and zero-extend"]
14702#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_u32)"]
14703#[doc = "## Safety"]
14704#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14705#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14706#[inline(always)]
14707#[target_feature(enable = "sve")]
14708#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14709#[cfg_attr(test, assert_instr(ld1h))]
14710pub unsafe fn svld1uh_gather_s32offset_u32(
14711    pg: svbool_t,
14712    base: *const u16,
14713    offsets: svint32_t,
14714) -> svuint32_t {
14715    unsafe extern "unadjusted" {
14716        #[cfg_attr(
14717            target_arch = "aarch64",
14718            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16"
14719        )]
14720        fn _svld1uh_gather_s32offset_u32(
14721            pg: svbool4_t,
14722            base: *const i16,
14723            offsets: svint32_t,
14724        ) -> nxv4i16;
14725    }
14726    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
14727        _svld1uh_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
14728    )
14729}
14730#[doc = "Load 8-bit data and zero-extend"]
14731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_s64)"]
14732#[doc = "## Safety"]
14733#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14734#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14735#[inline(always)]
14736#[target_feature(enable = "sve")]
14737#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14738#[cfg_attr(test, assert_instr(ld1b))]
14739pub unsafe fn svld1ub_gather_s64offset_s64(
14740    pg: svbool_t,
14741    base: *const u8,
14742    offsets: svint64_t,
14743) -> svint64_t {
14744    svld1ub_gather_s64offset_u64(pg, base, offsets).as_signed()
14745}
14746#[doc = "Load 16-bit data and zero-extend"]
14747#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_s64)"]
14748#[doc = "## Safety"]
14749#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14750#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14751#[inline(always)]
14752#[target_feature(enable = "sve")]
14753#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14754#[cfg_attr(test, assert_instr(ld1h))]
14755pub unsafe fn svld1uh_gather_s64offset_s64(
14756    pg: svbool_t,
14757    base: *const u16,
14758    offsets: svint64_t,
14759) -> svint64_t {
14760    svld1uh_gather_s64offset_u64(pg, base, offsets).as_signed()
14761}
14762#[doc = "Load 32-bit data and zero-extend"]
14763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_s64)"]
14764#[doc = "## Safety"]
14765#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14766#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14767#[inline(always)]
14768#[target_feature(enable = "sve")]
14769#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14770#[cfg_attr(test, assert_instr(ld1w))]
14771pub unsafe fn svld1uw_gather_s64offset_s64(
14772    pg: svbool_t,
14773    base: *const u32,
14774    offsets: svint64_t,
14775) -> svint64_t {
14776    svld1uw_gather_s64offset_u64(pg, base, offsets).as_signed()
14777}
14778#[doc = "Load 8-bit data and zero-extend"]
14779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_u64)"]
14780#[doc = "## Safety"]
14781#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14782#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14783#[inline(always)]
14784#[target_feature(enable = "sve")]
14785#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14786#[cfg_attr(test, assert_instr(ld1b))]
14787pub unsafe fn svld1ub_gather_s64offset_u64(
14788    pg: svbool_t,
14789    base: *const u8,
14790    offsets: svint64_t,
14791) -> svuint64_t {
14792    unsafe extern "unadjusted" {
14793        #[cfg_attr(
14794            target_arch = "aarch64",
14795            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8"
14796        )]
14797        fn _svld1ub_gather_s64offset_u64(
14798            pg: svbool2_t,
14799            base: *const i8,
14800            offsets: svint64_t,
14801        ) -> nxv2i8;
14802    }
14803    crate::intrinsics::simd::simd_cast::<nxv2u8, _>(
14804        _svld1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
14805    )
14806}
14807#[doc = "Load 16-bit data and zero-extend"]
14808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_u64)"]
14809#[doc = "## Safety"]
14810#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14811#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14812#[inline(always)]
14813#[target_feature(enable = "sve")]
14814#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14815#[cfg_attr(test, assert_instr(ld1h))]
14816pub unsafe fn svld1uh_gather_s64offset_u64(
14817    pg: svbool_t,
14818    base: *const u16,
14819    offsets: svint64_t,
14820) -> svuint64_t {
14821    unsafe extern "unadjusted" {
14822        #[cfg_attr(
14823            target_arch = "aarch64",
14824            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16"
14825        )]
14826        fn _svld1uh_gather_s64offset_u64(
14827            pg: svbool2_t,
14828            base: *const i16,
14829            offsets: svint64_t,
14830        ) -> nxv2i16;
14831    }
14832    crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
14833        _svld1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
14834    )
14835}
14836#[doc = "Load 32-bit data and zero-extend"]
14837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_u64)"]
14838#[doc = "## Safety"]
14839#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14840#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14841#[inline(always)]
14842#[target_feature(enable = "sve")]
14843#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14844#[cfg_attr(test, assert_instr(ld1w))]
14845pub unsafe fn svld1uw_gather_s64offset_u64(
14846    pg: svbool_t,
14847    base: *const u32,
14848    offsets: svint64_t,
14849) -> svuint64_t {
14850    unsafe extern "unadjusted" {
14851        #[cfg_attr(
14852            target_arch = "aarch64",
14853            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32"
14854        )]
14855        fn _svld1uw_gather_s64offset_u64(
14856            pg: svbool2_t,
14857            base: *const i32,
14858            offsets: svint64_t,
14859        ) -> nxv2i32;
14860    }
14861    crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
14862        _svld1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
14863    )
14864}
14865#[doc = "Load 8-bit data and zero-extend"]
14866#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_s32)"]
14867#[doc = "## Safety"]
14868#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14869#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14870#[inline(always)]
14871#[target_feature(enable = "sve")]
14872#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14873#[cfg_attr(test, assert_instr(ld1b))]
14874pub unsafe fn svld1ub_gather_u32offset_s32(
14875    pg: svbool_t,
14876    base: *const u8,
14877    offsets: svuint32_t,
14878) -> svint32_t {
14879    svld1ub_gather_u32offset_u32(pg, base, offsets).as_signed()
14880}
14881#[doc = "Load 16-bit data and zero-extend"]
14882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_s32)"]
14883#[doc = "## Safety"]
14884#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14885#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14886#[inline(always)]
14887#[target_feature(enable = "sve")]
14888#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14889#[cfg_attr(test, assert_instr(ld1h))]
14890pub unsafe fn svld1uh_gather_u32offset_s32(
14891    pg: svbool_t,
14892    base: *const u16,
14893    offsets: svuint32_t,
14894) -> svint32_t {
14895    svld1uh_gather_u32offset_u32(pg, base, offsets).as_signed()
14896}
14897#[doc = "Load 8-bit data and zero-extend"]
14898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_u32)"]
14899#[doc = "## Safety"]
14900#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14901#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14902#[inline(always)]
14903#[target_feature(enable = "sve")]
14904#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14905#[cfg_attr(test, assert_instr(ld1b))]
14906pub unsafe fn svld1ub_gather_u32offset_u32(
14907    pg: svbool_t,
14908    base: *const u8,
14909    offsets: svuint32_t,
14910) -> svuint32_t {
14911    unsafe extern "unadjusted" {
14912        #[cfg_attr(
14913            target_arch = "aarch64",
14914            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8"
14915        )]
14916        fn _svld1ub_gather_u32offset_u32(
14917            pg: svbool4_t,
14918            base: *const i8,
14919            offsets: svint32_t,
14920        ) -> nxv4i8;
14921    }
14922    crate::intrinsics::simd::simd_cast::<nxv4u8, _>(
14923        _svld1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed())
14924            .as_unsigned(),
14925    )
14926}
14927#[doc = "Load 16-bit data and zero-extend"]
14928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_u32)"]
14929#[doc = "## Safety"]
14930#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14931#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14932#[inline(always)]
14933#[target_feature(enable = "sve")]
14934#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14935#[cfg_attr(test, assert_instr(ld1h))]
14936pub unsafe fn svld1uh_gather_u32offset_u32(
14937    pg: svbool_t,
14938    base: *const u16,
14939    offsets: svuint32_t,
14940) -> svuint32_t {
14941    unsafe extern "unadjusted" {
14942        #[cfg_attr(
14943            target_arch = "aarch64",
14944            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16"
14945        )]
14946        fn _svld1uh_gather_u32offset_u32(
14947            pg: svbool4_t,
14948            base: *const i16,
14949            offsets: svint32_t,
14950        ) -> nxv4i16;
14951    }
14952    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
14953        _svld1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed())
14954            .as_unsigned(),
14955    )
14956}
14957#[doc = "Load 8-bit data and zero-extend"]
14958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_s64)"]
14959#[doc = "## Safety"]
14960#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14961#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14962#[inline(always)]
14963#[target_feature(enable = "sve")]
14964#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14965#[cfg_attr(test, assert_instr(ld1b))]
14966pub unsafe fn svld1ub_gather_u64offset_s64(
14967    pg: svbool_t,
14968    base: *const u8,
14969    offsets: svuint64_t,
14970) -> svint64_t {
14971    svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
14972}
14973#[doc = "Load 16-bit data and zero-extend"]
14974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_s64)"]
14975#[doc = "## Safety"]
14976#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14977#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14978#[inline(always)]
14979#[target_feature(enable = "sve")]
14980#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14981#[cfg_attr(test, assert_instr(ld1h))]
14982pub unsafe fn svld1uh_gather_u64offset_s64(
14983    pg: svbool_t,
14984    base: *const u16,
14985    offsets: svuint64_t,
14986) -> svint64_t {
14987    svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
14988}
14989#[doc = "Load 32-bit data and zero-extend"]
14990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_s64)"]
14991#[doc = "## Safety"]
14992#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
14993#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
14994#[inline(always)]
14995#[target_feature(enable = "sve")]
14996#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
14997#[cfg_attr(test, assert_instr(ld1w))]
14998pub unsafe fn svld1uw_gather_u64offset_s64(
14999    pg: svbool_t,
15000    base: *const u32,
15001    offsets: svuint64_t,
15002) -> svint64_t {
15003    svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
15004}
15005#[doc = "Load 8-bit data and zero-extend"]
15006#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_u64)"]
15007#[doc = "## Safety"]
15008#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15009#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15010#[inline(always)]
15011#[target_feature(enable = "sve")]
15012#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15013#[cfg_attr(test, assert_instr(ld1b))]
15014pub unsafe fn svld1ub_gather_u64offset_u64(
15015    pg: svbool_t,
15016    base: *const u8,
15017    offsets: svuint64_t,
15018) -> svuint64_t {
15019    svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed())
15020}
15021#[doc = "Load 16-bit data and zero-extend"]
15022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_u64)"]
15023#[doc = "## Safety"]
15024#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15025#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15026#[inline(always)]
15027#[target_feature(enable = "sve")]
15028#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15029#[cfg_attr(test, assert_instr(ld1h))]
15030pub unsafe fn svld1uh_gather_u64offset_u64(
15031    pg: svbool_t,
15032    base: *const u16,
15033    offsets: svuint64_t,
15034) -> svuint64_t {
15035    svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed())
15036}
15037#[doc = "Load 32-bit data and zero-extend"]
15038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_u64)"]
15039#[doc = "## Safety"]
15040#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15041#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15042#[inline(always)]
15043#[target_feature(enable = "sve")]
15044#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15045#[cfg_attr(test, assert_instr(ld1w))]
15046pub unsafe fn svld1uw_gather_u64offset_u64(
15047    pg: svbool_t,
15048    base: *const u32,
15049    offsets: svuint64_t,
15050) -> svuint64_t {
15051    svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed())
15052}
15053#[doc = "Load 8-bit data and zero-extend"]
15054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_s32)"]
15055#[doc = "## Safety"]
15056#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15057#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15058#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15059#[inline(always)]
15060#[target_feature(enable = "sve")]
15061#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15062#[cfg_attr(test, assert_instr(ld1b))]
15063pub unsafe fn svld1ub_gather_u32base_offset_s32(
15064    pg: svbool_t,
15065    bases: svuint32_t,
15066    offset: i64,
15067) -> svint32_t {
15068    svld1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed()
15069}
15070#[doc = "Load 16-bit data and zero-extend"]
15071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_s32)"]
15072#[doc = "## Safety"]
15073#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15074#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15075#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15076#[inline(always)]
15077#[target_feature(enable = "sve")]
15078#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15079#[cfg_attr(test, assert_instr(ld1h))]
15080pub unsafe fn svld1uh_gather_u32base_offset_s32(
15081    pg: svbool_t,
15082    bases: svuint32_t,
15083    offset: i64,
15084) -> svint32_t {
15085    svld1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed()
15086}
15087#[doc = "Load 8-bit data and zero-extend"]
15088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_u32)"]
15089#[doc = "## Safety"]
15090#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15091#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15092#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15093#[inline(always)]
15094#[target_feature(enable = "sve")]
15095#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15096#[cfg_attr(test, assert_instr(ld1b))]
15097pub unsafe fn svld1ub_gather_u32base_offset_u32(
15098    pg: svbool_t,
15099    bases: svuint32_t,
15100    offset: i64,
15101) -> svuint32_t {
15102    unsafe extern "unadjusted" {
15103        #[cfg_attr(
15104            target_arch = "aarch64",
15105            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32"
15106        )]
15107        fn _svld1ub_gather_u32base_offset_u32(
15108            pg: svbool4_t,
15109            bases: svint32_t,
15110            offset: i64,
15111        ) -> nxv4i8;
15112    }
15113    crate::intrinsics::simd::simd_cast::<nxv4u8, _>(
15114        _svld1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset).as_unsigned(),
15115    )
15116}
15117#[doc = "Load 16-bit data and zero-extend"]
15118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_u32)"]
15119#[doc = "## Safety"]
15120#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15121#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15122#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15123#[inline(always)]
15124#[target_feature(enable = "sve")]
15125#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15126#[cfg_attr(test, assert_instr(ld1h))]
15127pub unsafe fn svld1uh_gather_u32base_offset_u32(
15128    pg: svbool_t,
15129    bases: svuint32_t,
15130    offset: i64,
15131) -> svuint32_t {
15132    unsafe extern "unadjusted" {
15133        #[cfg_attr(
15134            target_arch = "aarch64",
15135            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32"
15136        )]
15137        fn _svld1uh_gather_u32base_offset_u32(
15138            pg: svbool4_t,
15139            bases: svint32_t,
15140            offset: i64,
15141        ) -> nxv4i16;
15142    }
15143    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
15144        _svld1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset).as_unsigned(),
15145    )
15146}
15147#[doc = "Load 8-bit data and zero-extend"]
15148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_s64)"]
15149#[doc = "## Safety"]
15150#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15151#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15152#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15153#[inline(always)]
15154#[target_feature(enable = "sve")]
15155#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15156#[cfg_attr(test, assert_instr(ld1b))]
15157pub unsafe fn svld1ub_gather_u64base_offset_s64(
15158    pg: svbool_t,
15159    bases: svuint64_t,
15160    offset: i64,
15161) -> svint64_t {
15162    svld1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed()
15163}
15164#[doc = "Load 16-bit data and zero-extend"]
15165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_s64)"]
15166#[doc = "## Safety"]
15167#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15168#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15169#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15170#[inline(always)]
15171#[target_feature(enable = "sve")]
15172#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15173#[cfg_attr(test, assert_instr(ld1h))]
15174pub unsafe fn svld1uh_gather_u64base_offset_s64(
15175    pg: svbool_t,
15176    bases: svuint64_t,
15177    offset: i64,
15178) -> svint64_t {
15179    svld1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed()
15180}
15181#[doc = "Load 32-bit data and zero-extend"]
15182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_s64)"]
15183#[doc = "## Safety"]
15184#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15185#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15186#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15187#[inline(always)]
15188#[target_feature(enable = "sve")]
15189#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15190#[cfg_attr(test, assert_instr(ld1w))]
15191pub unsafe fn svld1uw_gather_u64base_offset_s64(
15192    pg: svbool_t,
15193    bases: svuint64_t,
15194    offset: i64,
15195) -> svint64_t {
15196    svld1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed()
15197}
15198#[doc = "Load 8-bit data and zero-extend"]
15199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_u64)"]
15200#[doc = "## Safety"]
15201#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15202#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15203#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15204#[inline(always)]
15205#[target_feature(enable = "sve")]
15206#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15207#[cfg_attr(test, assert_instr(ld1b))]
15208pub unsafe fn svld1ub_gather_u64base_offset_u64(
15209    pg: svbool_t,
15210    bases: svuint64_t,
15211    offset: i64,
15212) -> svuint64_t {
15213    unsafe extern "unadjusted" {
15214        #[cfg_attr(
15215            target_arch = "aarch64",
15216            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64"
15217        )]
15218        fn _svld1ub_gather_u64base_offset_u64(
15219            pg: svbool2_t,
15220            bases: svint64_t,
15221            offset: i64,
15222        ) -> nxv2i8;
15223    }
15224    crate::intrinsics::simd::simd_cast::<nxv2u8, _>(
15225        _svld1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(),
15226    )
15227}
15228#[doc = "Load 16-bit data and zero-extend"]
15229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_u64)"]
15230#[doc = "## Safety"]
15231#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15232#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15233#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15234#[inline(always)]
15235#[target_feature(enable = "sve")]
15236#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15237#[cfg_attr(test, assert_instr(ld1h))]
15238pub unsafe fn svld1uh_gather_u64base_offset_u64(
15239    pg: svbool_t,
15240    bases: svuint64_t,
15241    offset: i64,
15242) -> svuint64_t {
15243    unsafe extern "unadjusted" {
15244        #[cfg_attr(
15245            target_arch = "aarch64",
15246            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64"
15247        )]
15248        fn _svld1uh_gather_u64base_offset_u64(
15249            pg: svbool2_t,
15250            bases: svint64_t,
15251            offset: i64,
15252        ) -> nxv2i16;
15253    }
15254    crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
15255        _svld1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(),
15256    )
15257}
15258#[doc = "Load 32-bit data and zero-extend"]
15259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_u64)"]
15260#[doc = "## Safety"]
15261#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15262#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15263#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15264#[inline(always)]
15265#[target_feature(enable = "sve")]
15266#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15267#[cfg_attr(test, assert_instr(ld1w))]
15268pub unsafe fn svld1uw_gather_u64base_offset_u64(
15269    pg: svbool_t,
15270    bases: svuint64_t,
15271    offset: i64,
15272) -> svuint64_t {
15273    unsafe extern "unadjusted" {
15274        #[cfg_attr(
15275            target_arch = "aarch64",
15276            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64"
15277        )]
15278        fn _svld1uw_gather_u64base_offset_u64(
15279            pg: svbool2_t,
15280            bases: svint64_t,
15281            offset: i64,
15282        ) -> nxv2i32;
15283    }
15284    crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
15285        _svld1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(),
15286    )
15287}
15288#[doc = "Load 8-bit data and zero-extend"]
15289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_s32)"]
15290#[doc = "## Safety"]
15291#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15292#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15293#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15294#[inline(always)]
15295#[target_feature(enable = "sve")]
15296#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15297#[cfg_attr(test, assert_instr(ld1b))]
15298pub unsafe fn svld1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
15299    svld1ub_gather_u32base_offset_s32(pg, bases, 0)
15300}
15301#[doc = "Load 16-bit data and zero-extend"]
15302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_s32)"]
15303#[doc = "## Safety"]
15304#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15305#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15306#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15307#[inline(always)]
15308#[target_feature(enable = "sve")]
15309#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15310#[cfg_attr(test, assert_instr(ld1h))]
15311pub unsafe fn svld1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
15312    svld1uh_gather_u32base_offset_s32(pg, bases, 0)
15313}
15314#[doc = "Load 8-bit data and zero-extend"]
15315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_u32)"]
15316#[doc = "## Safety"]
15317#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15318#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15319#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15320#[inline(always)]
15321#[target_feature(enable = "sve")]
15322#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15323#[cfg_attr(test, assert_instr(ld1b))]
15324pub unsafe fn svld1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
15325    svld1ub_gather_u32base_offset_u32(pg, bases, 0)
15326}
15327#[doc = "Load 16-bit data and zero-extend"]
15328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_u32)"]
15329#[doc = "## Safety"]
15330#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15331#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15332#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15333#[inline(always)]
15334#[target_feature(enable = "sve")]
15335#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15336#[cfg_attr(test, assert_instr(ld1h))]
15337pub unsafe fn svld1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
15338    svld1uh_gather_u32base_offset_u32(pg, bases, 0)
15339}
15340#[doc = "Load 8-bit data and zero-extend"]
15341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_s64)"]
15342#[doc = "## Safety"]
15343#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15344#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15345#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15346#[inline(always)]
15347#[target_feature(enable = "sve")]
15348#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15349#[cfg_attr(test, assert_instr(ld1b))]
15350pub unsafe fn svld1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
15351    svld1ub_gather_u64base_offset_s64(pg, bases, 0)
15352}
15353#[doc = "Load 16-bit data and zero-extend"]
15354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_s64)"]
15355#[doc = "## Safety"]
15356#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15357#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15358#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15359#[inline(always)]
15360#[target_feature(enable = "sve")]
15361#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15362#[cfg_attr(test, assert_instr(ld1h))]
15363pub unsafe fn svld1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
15364    svld1uh_gather_u64base_offset_s64(pg, bases, 0)
15365}
15366#[doc = "Load 32-bit data and zero-extend"]
15367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_s64)"]
15368#[doc = "## Safety"]
15369#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15370#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15371#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15372#[inline(always)]
15373#[target_feature(enable = "sve")]
15374#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15375#[cfg_attr(test, assert_instr(ld1w))]
15376pub unsafe fn svld1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
15377    svld1uw_gather_u64base_offset_s64(pg, bases, 0)
15378}
15379#[doc = "Load 8-bit data and zero-extend"]
15380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_u64)"]
15381#[doc = "## Safety"]
15382#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15383#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15384#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15385#[inline(always)]
15386#[target_feature(enable = "sve")]
15387#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15388#[cfg_attr(test, assert_instr(ld1b))]
15389pub unsafe fn svld1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
15390    svld1ub_gather_u64base_offset_u64(pg, bases, 0)
15391}
15392#[doc = "Load 16-bit data and zero-extend"]
15393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_u64)"]
15394#[doc = "## Safety"]
15395#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15396#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15397#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15398#[inline(always)]
15399#[target_feature(enable = "sve")]
15400#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15401#[cfg_attr(test, assert_instr(ld1h))]
15402pub unsafe fn svld1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
15403    svld1uh_gather_u64base_offset_u64(pg, bases, 0)
15404}
15405#[doc = "Load 32-bit data and zero-extend"]
15406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_u64)"]
15407#[doc = "## Safety"]
15408#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15409#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15410#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15411#[inline(always)]
15412#[target_feature(enable = "sve")]
15413#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15414#[cfg_attr(test, assert_instr(ld1w))]
15415pub unsafe fn svld1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
15416    svld1uw_gather_u64base_offset_u64(pg, bases, 0)
15417}
15418#[doc = "Load 8-bit data and zero-extend"]
15419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s16)"]
15420#[doc = "## Safety"]
15421#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15422#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15423#[inline(always)]
15424#[target_feature(enable = "sve")]
15425#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15426#[cfg_attr(test, assert_instr(ld1b))]
15427pub unsafe fn svld1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t {
15428    unsafe extern "unadjusted" {
15429        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")]
15430        fn _svld1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
15431    }
15432    crate::intrinsics::simd::simd_cast::<nxv8u8, _>(
15433        _svld1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(),
15434    )
15435}
15436#[doc = "Load 8-bit data and zero-extend"]
15437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s32)"]
15438#[doc = "## Safety"]
15439#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15440#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15441#[inline(always)]
15442#[target_feature(enable = "sve")]
15443#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15444#[cfg_attr(test, assert_instr(ld1b))]
15445pub unsafe fn svld1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t {
15446    unsafe extern "unadjusted" {
15447        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")]
15448        fn _svld1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
15449    }
15450    crate::intrinsics::simd::simd_cast::<nxv4u8, _>(
15451        _svld1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(),
15452    )
15453}
15454#[doc = "Load 16-bit data and zero-extend"]
15455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s32)"]
15456#[doc = "## Safety"]
15457#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15458#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15459#[inline(always)]
15460#[target_feature(enable = "sve")]
15461#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15462#[cfg_attr(test, assert_instr(ld1h))]
15463pub unsafe fn svld1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t {
15464    unsafe extern "unadjusted" {
15465        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")]
15466        fn _svld1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
15467    }
15468    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
15469        _svld1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(),
15470    )
15471}
15472#[doc = "Load 8-bit data and zero-extend"]
15473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s64)"]
15474#[doc = "## Safety"]
15475#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15476#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15477#[inline(always)]
15478#[target_feature(enable = "sve")]
15479#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15480#[cfg_attr(test, assert_instr(ld1b))]
15481pub unsafe fn svld1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t {
15482    unsafe extern "unadjusted" {
15483        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")]
15484        fn _svld1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
15485    }
15486    crate::intrinsics::simd::simd_cast::<nxv2u8, _>(
15487        _svld1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(),
15488    )
15489}
15490#[doc = "Load 16-bit data and zero-extend"]
15491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s64)"]
15492#[doc = "## Safety"]
15493#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15494#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15495#[inline(always)]
15496#[target_feature(enable = "sve")]
15497#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15498#[cfg_attr(test, assert_instr(ld1h))]
15499pub unsafe fn svld1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t {
15500    unsafe extern "unadjusted" {
15501        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")]
15502        fn _svld1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
15503    }
15504    crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
15505        _svld1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(),
15506    )
15507}
15508#[doc = "Load 32-bit data and zero-extend"]
15509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_s64)"]
15510#[doc = "## Safety"]
15511#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15512#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15513#[inline(always)]
15514#[target_feature(enable = "sve")]
15515#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15516#[cfg_attr(test, assert_instr(ld1w))]
15517pub unsafe fn svld1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t {
15518    unsafe extern "unadjusted" {
15519        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")]
15520        fn _svld1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
15521    }
15522    crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
15523        _svld1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(),
15524    )
15525}
15526#[doc = "Load 8-bit data and zero-extend"]
15527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u16)"]
15528#[doc = "## Safety"]
15529#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15530#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15531#[inline(always)]
15532#[target_feature(enable = "sve")]
15533#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15534#[cfg_attr(test, assert_instr(ld1b))]
15535pub unsafe fn svld1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t {
15536    svld1ub_s16(pg, base).as_unsigned()
15537}
15538#[doc = "Load 8-bit data and zero-extend"]
15539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u32)"]
15540#[doc = "## Safety"]
15541#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15542#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15543#[inline(always)]
15544#[target_feature(enable = "sve")]
15545#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15546#[cfg_attr(test, assert_instr(ld1b))]
15547pub unsafe fn svld1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t {
15548    svld1ub_s32(pg, base).as_unsigned()
15549}
15550#[doc = "Load 16-bit data and zero-extend"]
15551#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u32)"]
15552#[doc = "## Safety"]
15553#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15554#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15555#[inline(always)]
15556#[target_feature(enable = "sve")]
15557#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15558#[cfg_attr(test, assert_instr(ld1h))]
15559pub unsafe fn svld1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t {
15560    svld1uh_s32(pg, base).as_unsigned()
15561}
15562#[doc = "Load 8-bit data and zero-extend"]
15563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u64)"]
15564#[doc = "## Safety"]
15565#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15566#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15567#[inline(always)]
15568#[target_feature(enable = "sve")]
15569#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15570#[cfg_attr(test, assert_instr(ld1b))]
15571pub unsafe fn svld1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t {
15572    svld1ub_s64(pg, base).as_unsigned()
15573}
15574#[doc = "Load 16-bit data and zero-extend"]
15575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u64)"]
15576#[doc = "## Safety"]
15577#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15578#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15579#[inline(always)]
15580#[target_feature(enable = "sve")]
15581#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15582#[cfg_attr(test, assert_instr(ld1h))]
15583pub unsafe fn svld1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t {
15584    svld1uh_s64(pg, base).as_unsigned()
15585}
15586#[doc = "Load 32-bit data and zero-extend"]
15587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_u64)"]
15588#[doc = "## Safety"]
15589#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15590#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15591#[inline(always)]
15592#[target_feature(enable = "sve")]
15593#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15594#[cfg_attr(test, assert_instr(ld1w))]
15595pub unsafe fn svld1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t {
15596    svld1uw_s64(pg, base).as_unsigned()
15597}
15598#[doc = "Load 8-bit data and zero-extend"]
15599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s16)"]
15600#[doc = "## Safety"]
15601#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15602#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15603#[inline(always)]
15604#[target_feature(enable = "sve")]
15605#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15606#[cfg_attr(test, assert_instr(ld1b))]
15607pub unsafe fn svld1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t {
15608    svld1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize))
15609}
15610#[doc = "Load 8-bit data and zero-extend"]
15611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s32)"]
15612#[doc = "## Safety"]
15613#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15614#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15615#[inline(always)]
15616#[target_feature(enable = "sve")]
15617#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15618#[cfg_attr(test, assert_instr(ld1b))]
15619pub unsafe fn svld1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t {
15620    svld1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize))
15621}
15622#[doc = "Load 16-bit data and zero-extend"]
15623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s32)"]
15624#[doc = "## Safety"]
15625#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15626#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15627#[inline(always)]
15628#[target_feature(enable = "sve")]
15629#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15630#[cfg_attr(test, assert_instr(ld1h))]
15631pub unsafe fn svld1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t {
15632    svld1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
15633}
15634#[doc = "Load 8-bit data and zero-extend"]
15635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s64)"]
15636#[doc = "## Safety"]
15637#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15638#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15639#[inline(always)]
15640#[target_feature(enable = "sve")]
15641#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15642#[cfg_attr(test, assert_instr(ld1b))]
15643pub unsafe fn svld1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t {
15644    svld1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize))
15645}
15646#[doc = "Load 16-bit data and zero-extend"]
15647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s64)"]
15648#[doc = "## Safety"]
15649#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15650#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15651#[inline(always)]
15652#[target_feature(enable = "sve")]
15653#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15654#[cfg_attr(test, assert_instr(ld1h))]
15655pub unsafe fn svld1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t {
15656    svld1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
15657}
15658#[doc = "Load 32-bit data and zero-extend"]
15659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_s64)"]
15660#[doc = "## Safety"]
15661#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15662#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15663#[inline(always)]
15664#[target_feature(enable = "sve")]
15665#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15666#[cfg_attr(test, assert_instr(ld1w))]
15667pub unsafe fn svld1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t {
15668    svld1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
15669}
15670#[doc = "Load 8-bit data and zero-extend"]
15671#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u16)"]
15672#[doc = "## Safety"]
15673#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15674#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15675#[inline(always)]
15676#[target_feature(enable = "sve")]
15677#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15678#[cfg_attr(test, assert_instr(ld1b))]
15679pub unsafe fn svld1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t {
15680    svld1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize))
15681}
15682#[doc = "Load 8-bit data and zero-extend"]
15683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u32)"]
15684#[doc = "## Safety"]
15685#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15686#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15687#[inline(always)]
15688#[target_feature(enable = "sve")]
15689#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15690#[cfg_attr(test, assert_instr(ld1b))]
15691pub unsafe fn svld1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t {
15692    svld1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize))
15693}
15694#[doc = "Load 16-bit data and zero-extend"]
15695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u32)"]
15696#[doc = "## Safety"]
15697#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15698#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15699#[inline(always)]
15700#[target_feature(enable = "sve")]
15701#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15702#[cfg_attr(test, assert_instr(ld1h))]
15703pub unsafe fn svld1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t {
15704    svld1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
15705}
15706#[doc = "Load 8-bit data and zero-extend"]
15707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u64)"]
15708#[doc = "## Safety"]
15709#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15710#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15711#[inline(always)]
15712#[target_feature(enable = "sve")]
15713#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15714#[cfg_attr(test, assert_instr(ld1b))]
15715pub unsafe fn svld1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t {
15716    svld1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize))
15717}
15718#[doc = "Load 16-bit data and zero-extend"]
15719#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u64)"]
15720#[doc = "## Safety"]
15721#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15722#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15723#[inline(always)]
15724#[target_feature(enable = "sve")]
15725#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15726#[cfg_attr(test, assert_instr(ld1h))]
15727pub unsafe fn svld1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t {
15728    svld1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
15729}
15730#[doc = "Load 32-bit data and zero-extend"]
15731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_u64)"]
15732#[doc = "## Safety"]
15733#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
15734#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15735#[inline(always)]
15736#[target_feature(enable = "sve")]
15737#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15738#[cfg_attr(test, assert_instr(ld1w))]
15739pub unsafe fn svld1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t {
15740    svld1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
15741}
15742#[doc = "Load 16-bit data and zero-extend"]
15743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_s32)"]
15744#[doc = "## Safety"]
15745#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15746#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15747#[inline(always)]
15748#[target_feature(enable = "sve")]
15749#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15750#[cfg_attr(test, assert_instr(ld1h))]
15751pub unsafe fn svld1uh_gather_s32index_s32(
15752    pg: svbool_t,
15753    base: *const u16,
15754    indices: svint32_t,
15755) -> svint32_t {
15756    svld1uh_gather_s32index_u32(pg, base, indices).as_signed()
15757}
15758#[doc = "Load 16-bit data and zero-extend"]
15759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_u32)"]
15760#[doc = "## Safety"]
15761#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15762#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15763#[inline(always)]
15764#[target_feature(enable = "sve")]
15765#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15766#[cfg_attr(test, assert_instr(ld1h))]
15767pub unsafe fn svld1uh_gather_s32index_u32(
15768    pg: svbool_t,
15769    base: *const u16,
15770    indices: svint32_t,
15771) -> svuint32_t {
15772    unsafe extern "unadjusted" {
15773        #[cfg_attr(
15774            target_arch = "aarch64",
15775            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16"
15776        )]
15777        fn _svld1uh_gather_s32index_u32(
15778            pg: svbool4_t,
15779            base: *const i16,
15780            indices: svint32_t,
15781        ) -> nxv4i16;
15782    }
15783    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
15784        _svld1uh_gather_s32index_u32(pg.sve_into(), base.as_signed(), indices).as_unsigned(),
15785    )
15786}
15787#[doc = "Load 16-bit data and zero-extend"]
15788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_s64)"]
15789#[doc = "## Safety"]
15790#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15791#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15792#[inline(always)]
15793#[target_feature(enable = "sve")]
15794#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15795#[cfg_attr(test, assert_instr(ld1h))]
15796pub unsafe fn svld1uh_gather_s64index_s64(
15797    pg: svbool_t,
15798    base: *const u16,
15799    indices: svint64_t,
15800) -> svint64_t {
15801    svld1uh_gather_s64index_u64(pg, base, indices).as_signed()
15802}
15803#[doc = "Load 32-bit data and zero-extend"]
15804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_s64)"]
15805#[doc = "## Safety"]
15806#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15807#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15808#[inline(always)]
15809#[target_feature(enable = "sve")]
15810#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15811#[cfg_attr(test, assert_instr(ld1w))]
15812pub unsafe fn svld1uw_gather_s64index_s64(
15813    pg: svbool_t,
15814    base: *const u32,
15815    indices: svint64_t,
15816) -> svint64_t {
15817    svld1uw_gather_s64index_u64(pg, base, indices).as_signed()
15818}
15819#[doc = "Load 16-bit data and zero-extend"]
15820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_u64)"]
15821#[doc = "## Safety"]
15822#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15823#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15824#[inline(always)]
15825#[target_feature(enable = "sve")]
15826#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15827#[cfg_attr(test, assert_instr(ld1h))]
15828pub unsafe fn svld1uh_gather_s64index_u64(
15829    pg: svbool_t,
15830    base: *const u16,
15831    indices: svint64_t,
15832) -> svuint64_t {
15833    unsafe extern "unadjusted" {
15834        #[cfg_attr(
15835            target_arch = "aarch64",
15836            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16"
15837        )]
15838        fn _svld1uh_gather_s64index_u64(
15839            pg: svbool2_t,
15840            base: *const i16,
15841            indices: svint64_t,
15842        ) -> nxv2i16;
15843    }
15844    crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
15845        _svld1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(),
15846    )
15847}
15848#[doc = "Load 32-bit data and zero-extend"]
15849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_u64)"]
15850#[doc = "## Safety"]
15851#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15852#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15853#[inline(always)]
15854#[target_feature(enable = "sve")]
15855#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15856#[cfg_attr(test, assert_instr(ld1w))]
15857pub unsafe fn svld1uw_gather_s64index_u64(
15858    pg: svbool_t,
15859    base: *const u32,
15860    indices: svint64_t,
15861) -> svuint64_t {
15862    unsafe extern "unadjusted" {
15863        #[cfg_attr(
15864            target_arch = "aarch64",
15865            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32"
15866        )]
15867        fn _svld1uw_gather_s64index_u64(
15868            pg: svbool2_t,
15869            base: *const i32,
15870            indices: svint64_t,
15871        ) -> nxv2i32;
15872    }
15873    crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
15874        _svld1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(),
15875    )
15876}
15877#[doc = "Load 16-bit data and zero-extend"]
15878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_s32)"]
15879#[doc = "## Safety"]
15880#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15881#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15882#[inline(always)]
15883#[target_feature(enable = "sve")]
15884#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15885#[cfg_attr(test, assert_instr(ld1h))]
15886pub unsafe fn svld1uh_gather_u32index_s32(
15887    pg: svbool_t,
15888    base: *const u16,
15889    indices: svuint32_t,
15890) -> svint32_t {
15891    svld1uh_gather_u32index_u32(pg, base, indices).as_signed()
15892}
15893#[doc = "Load 16-bit data and zero-extend"]
15894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_u32)"]
15895#[doc = "## Safety"]
15896#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15897#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15898#[inline(always)]
15899#[target_feature(enable = "sve")]
15900#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15901#[cfg_attr(test, assert_instr(ld1h))]
15902pub unsafe fn svld1uh_gather_u32index_u32(
15903    pg: svbool_t,
15904    base: *const u16,
15905    indices: svuint32_t,
15906) -> svuint32_t {
15907    unsafe extern "unadjusted" {
15908        #[cfg_attr(
15909            target_arch = "aarch64",
15910            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16"
15911        )]
15912        fn _svld1uh_gather_u32index_u32(
15913            pg: svbool4_t,
15914            base: *const i16,
15915            indices: svint32_t,
15916        ) -> nxv4i16;
15917    }
15918    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
15919        _svld1uh_gather_u32index_u32(pg.sve_into(), base.as_signed(), indices.as_signed())
15920            .as_unsigned(),
15921    )
15922}
15923#[doc = "Load 16-bit data and zero-extend"]
15924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_s64)"]
15925#[doc = "## Safety"]
15926#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15927#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15928#[inline(always)]
15929#[target_feature(enable = "sve")]
15930#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15931#[cfg_attr(test, assert_instr(ld1h))]
15932pub unsafe fn svld1uh_gather_u64index_s64(
15933    pg: svbool_t,
15934    base: *const u16,
15935    indices: svuint64_t,
15936) -> svint64_t {
15937    svld1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
15938}
15939#[doc = "Load 32-bit data and zero-extend"]
15940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_s64)"]
15941#[doc = "## Safety"]
15942#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15943#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15944#[inline(always)]
15945#[target_feature(enable = "sve")]
15946#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15947#[cfg_attr(test, assert_instr(ld1w))]
15948pub unsafe fn svld1uw_gather_u64index_s64(
15949    pg: svbool_t,
15950    base: *const u32,
15951    indices: svuint64_t,
15952) -> svint64_t {
15953    svld1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
15954}
15955#[doc = "Load 16-bit data and zero-extend"]
15956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_u64)"]
15957#[doc = "## Safety"]
15958#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15959#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15960#[inline(always)]
15961#[target_feature(enable = "sve")]
15962#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15963#[cfg_attr(test, assert_instr(ld1h))]
15964pub unsafe fn svld1uh_gather_u64index_u64(
15965    pg: svbool_t,
15966    base: *const u16,
15967    indices: svuint64_t,
15968) -> svuint64_t {
15969    svld1uh_gather_s64index_u64(pg, base, indices.as_signed())
15970}
15971#[doc = "Load 32-bit data and zero-extend"]
15972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_u64)"]
15973#[doc = "## Safety"]
15974#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15975#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15976#[inline(always)]
15977#[target_feature(enable = "sve")]
15978#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15979#[cfg_attr(test, assert_instr(ld1w))]
15980pub unsafe fn svld1uw_gather_u64index_u64(
15981    pg: svbool_t,
15982    base: *const u32,
15983    indices: svuint64_t,
15984) -> svuint64_t {
15985    svld1uw_gather_s64index_u64(pg, base, indices.as_signed())
15986}
15987#[doc = "Load 16-bit data and zero-extend"]
15988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_s32)"]
15989#[doc = "## Safety"]
15990#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
15991#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
15992#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
15993#[inline(always)]
15994#[target_feature(enable = "sve")]
15995#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
15996#[cfg_attr(test, assert_instr(ld1h))]
15997pub unsafe fn svld1uh_gather_u32base_index_s32(
15998    pg: svbool_t,
15999    bases: svuint32_t,
16000    index: i64,
16001) -> svint32_t {
16002    svld1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
16003}
16004#[doc = "Load 16-bit data and zero-extend"]
16005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_u32)"]
16006#[doc = "## Safety"]
16007#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16008#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16009#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
16010#[inline(always)]
16011#[target_feature(enable = "sve")]
16012#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16013#[cfg_attr(test, assert_instr(ld1h))]
16014pub unsafe fn svld1uh_gather_u32base_index_u32(
16015    pg: svbool_t,
16016    bases: svuint32_t,
16017    index: i64,
16018) -> svuint32_t {
16019    svld1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
16020}
16021#[doc = "Load 16-bit data and zero-extend"]
16022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_s64)"]
16023#[doc = "## Safety"]
16024#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16025#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16026#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
16027#[inline(always)]
16028#[target_feature(enable = "sve")]
16029#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16030#[cfg_attr(test, assert_instr(ld1h))]
16031pub unsafe fn svld1uh_gather_u64base_index_s64(
16032    pg: svbool_t,
16033    bases: svuint64_t,
16034    index: i64,
16035) -> svint64_t {
16036    svld1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
16037}
16038#[doc = "Load 32-bit data and zero-extend"]
16039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_s64)"]
16040#[doc = "## Safety"]
16041#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16042#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16043#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
16044#[inline(always)]
16045#[target_feature(enable = "sve")]
16046#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16047#[cfg_attr(test, assert_instr(ld1w))]
16048pub unsafe fn svld1uw_gather_u64base_index_s64(
16049    pg: svbool_t,
16050    bases: svuint64_t,
16051    index: i64,
16052) -> svint64_t {
16053    svld1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
16054}
16055#[doc = "Load 16-bit data and zero-extend"]
16056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_u64)"]
16057#[doc = "## Safety"]
16058#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16059#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16060#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
16061#[inline(always)]
16062#[target_feature(enable = "sve")]
16063#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16064#[cfg_attr(test, assert_instr(ld1h))]
16065pub unsafe fn svld1uh_gather_u64base_index_u64(
16066    pg: svbool_t,
16067    bases: svuint64_t,
16068    index: i64,
16069) -> svuint64_t {
16070    svld1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
16071}
16072#[doc = "Load 32-bit data and zero-extend"]
16073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_u64)"]
16074#[doc = "## Safety"]
16075#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16076#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16077#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
16078#[inline(always)]
16079#[target_feature(enable = "sve")]
16080#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16081#[cfg_attr(test, assert_instr(ld1w))]
16082pub unsafe fn svld1uw_gather_u64base_index_u64(
16083    pg: svbool_t,
16084    bases: svuint64_t,
16085    index: i64,
16086) -> svuint64_t {
16087    svld1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
16088}
16089#[doc = "Load two-element tuples into two vectors"]
16090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f32])"]
16091#[doc = "## Safety"]
16092#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16093#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16094#[inline(always)]
16095#[target_feature(enable = "sve")]
16096#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16097#[cfg_attr(test, assert_instr(ld2w))]
16098pub unsafe fn svld2_f32(pg: svbool_t, base: *const f32) -> svfloat32x2_t {
16099    unsafe extern "unadjusted" {
16100        #[cfg_attr(
16101            target_arch = "aarch64",
16102            link_name = "llvm.aarch64.sve.ld2.sret.nxv4f32"
16103        )]
16104        fn _svld2_f32(pg: svbool4_t, base: *const f32) -> svfloat32x2_t;
16105    }
16106    _svld2_f32(pg.sve_into(), base)
16107}
16108#[doc = "Load two-element tuples into two vectors"]
16109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f64])"]
16110#[doc = "## Safety"]
16111#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16112#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16113#[inline(always)]
16114#[target_feature(enable = "sve")]
16115#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16116#[cfg_attr(test, assert_instr(ld2d))]
16117pub unsafe fn svld2_f64(pg: svbool_t, base: *const f64) -> svfloat64x2_t {
16118    unsafe extern "unadjusted" {
16119        #[cfg_attr(
16120            target_arch = "aarch64",
16121            link_name = "llvm.aarch64.sve.ld2.sret.nxv2f64"
16122        )]
16123        fn _svld2_f64(pg: svbool2_t, base: *const f64) -> svfloat64x2_t;
16124    }
16125    _svld2_f64(pg.sve_into(), base)
16126}
16127#[doc = "Load two-element tuples into two vectors"]
16128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s8])"]
16129#[doc = "## Safety"]
16130#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16131#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16132#[inline(always)]
16133#[target_feature(enable = "sve")]
16134#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16135#[cfg_attr(test, assert_instr(ld2b))]
16136pub unsafe fn svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t {
16137    unsafe extern "unadjusted" {
16138        #[cfg_attr(
16139            target_arch = "aarch64",
16140            link_name = "llvm.aarch64.sve.ld2.sret.nxv16i8"
16141        )]
16142        fn _svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t;
16143    }
16144    _svld2_s8(pg, base)
16145}
16146#[doc = "Load two-element tuples into two vectors"]
16147#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s16])"]
16148#[doc = "## Safety"]
16149#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16150#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16151#[inline(always)]
16152#[target_feature(enable = "sve")]
16153#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16154#[cfg_attr(test, assert_instr(ld2h))]
16155pub unsafe fn svld2_s16(pg: svbool_t, base: *const i16) -> svint16x2_t {
16156    unsafe extern "unadjusted" {
16157        #[cfg_attr(
16158            target_arch = "aarch64",
16159            link_name = "llvm.aarch64.sve.ld2.sret.nxv8i16"
16160        )]
16161        fn _svld2_s16(pg: svbool8_t, base: *const i16) -> svint16x2_t;
16162    }
16163    _svld2_s16(pg.sve_into(), base)
16164}
16165#[doc = "Load two-element tuples into two vectors"]
16166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s32])"]
16167#[doc = "## Safety"]
16168#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16169#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16170#[inline(always)]
16171#[target_feature(enable = "sve")]
16172#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16173#[cfg_attr(test, assert_instr(ld2w))]
16174pub unsafe fn svld2_s32(pg: svbool_t, base: *const i32) -> svint32x2_t {
16175    unsafe extern "unadjusted" {
16176        #[cfg_attr(
16177            target_arch = "aarch64",
16178            link_name = "llvm.aarch64.sve.ld2.sret.nxv4i32"
16179        )]
16180        fn _svld2_s32(pg: svbool4_t, base: *const i32) -> svint32x2_t;
16181    }
16182    _svld2_s32(pg.sve_into(), base)
16183}
16184#[doc = "Load two-element tuples into two vectors"]
16185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s64])"]
16186#[doc = "## Safety"]
16187#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16188#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16189#[inline(always)]
16190#[target_feature(enable = "sve")]
16191#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16192#[cfg_attr(test, assert_instr(ld2d))]
16193pub unsafe fn svld2_s64(pg: svbool_t, base: *const i64) -> svint64x2_t {
16194    unsafe extern "unadjusted" {
16195        #[cfg_attr(
16196            target_arch = "aarch64",
16197            link_name = "llvm.aarch64.sve.ld2.sret.nxv2i64"
16198        )]
16199        fn _svld2_s64(pg: svbool2_t, base: *const i64) -> svint64x2_t;
16200    }
16201    _svld2_s64(pg.sve_into(), base)
16202}
16203#[doc = "Load two-element tuples into two vectors"]
16204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u8])"]
16205#[doc = "## Safety"]
16206#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16207#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16208#[inline(always)]
16209#[target_feature(enable = "sve")]
16210#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16211#[cfg_attr(test, assert_instr(ld2b))]
16212pub unsafe fn svld2_u8(pg: svbool_t, base: *const u8) -> svuint8x2_t {
16213    svld2_s8(pg, base.as_signed()).as_unsigned()
16214}
16215#[doc = "Load two-element tuples into two vectors"]
16216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u16])"]
16217#[doc = "## Safety"]
16218#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16219#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16220#[inline(always)]
16221#[target_feature(enable = "sve")]
16222#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16223#[cfg_attr(test, assert_instr(ld2h))]
16224pub unsafe fn svld2_u16(pg: svbool_t, base: *const u16) -> svuint16x2_t {
16225    svld2_s16(pg, base.as_signed()).as_unsigned()
16226}
16227#[doc = "Load two-element tuples into two vectors"]
16228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u32])"]
16229#[doc = "## Safety"]
16230#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16231#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16232#[inline(always)]
16233#[target_feature(enable = "sve")]
16234#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16235#[cfg_attr(test, assert_instr(ld2w))]
16236pub unsafe fn svld2_u32(pg: svbool_t, base: *const u32) -> svuint32x2_t {
16237    svld2_s32(pg, base.as_signed()).as_unsigned()
16238}
16239#[doc = "Load two-element tuples into two vectors"]
16240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u64])"]
16241#[doc = "## Safety"]
16242#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16243#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16244#[inline(always)]
16245#[target_feature(enable = "sve")]
16246#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16247#[cfg_attr(test, assert_instr(ld2d))]
16248pub unsafe fn svld2_u64(pg: svbool_t, base: *const u64) -> svuint64x2_t {
16249    svld2_s64(pg, base.as_signed()).as_unsigned()
16250}
16251#[doc = "Load two-element tuples into two vectors"]
16252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f32])"]
16253#[doc = "## Safety"]
16254#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16255#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16256#[inline(always)]
16257#[target_feature(enable = "sve")]
16258#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16259#[cfg_attr(test, assert_instr(ld2w))]
16260pub unsafe fn svld2_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x2_t {
16261    svld2_f32(pg, base.offset(svcntw() as isize * vnum as isize))
16262}
16263#[doc = "Load two-element tuples into two vectors"]
16264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f64])"]
16265#[doc = "## Safety"]
16266#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16267#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16268#[inline(always)]
16269#[target_feature(enable = "sve")]
16270#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16271#[cfg_attr(test, assert_instr(ld2d))]
16272pub unsafe fn svld2_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x2_t {
16273    svld2_f64(pg, base.offset(svcntd() as isize * vnum as isize))
16274}
16275#[doc = "Load two-element tuples into two vectors"]
16276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s8])"]
16277#[doc = "## Safety"]
16278#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16279#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16280#[inline(always)]
16281#[target_feature(enable = "sve")]
16282#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16283#[cfg_attr(test, assert_instr(ld2b))]
16284pub unsafe fn svld2_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x2_t {
16285    svld2_s8(pg, base.offset(svcntb() as isize * vnum as isize))
16286}
16287#[doc = "Load two-element tuples into two vectors"]
16288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s16])"]
16289#[doc = "## Safety"]
16290#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16291#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16292#[inline(always)]
16293#[target_feature(enable = "sve")]
16294#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16295#[cfg_attr(test, assert_instr(ld2h))]
16296pub unsafe fn svld2_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x2_t {
16297    svld2_s16(pg, base.offset(svcnth() as isize * vnum as isize))
16298}
16299#[doc = "Load two-element tuples into two vectors"]
16300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s32])"]
16301#[doc = "## Safety"]
16302#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16303#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16304#[inline(always)]
16305#[target_feature(enable = "sve")]
16306#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16307#[cfg_attr(test, assert_instr(ld2w))]
16308pub unsafe fn svld2_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x2_t {
16309    svld2_s32(pg, base.offset(svcntw() as isize * vnum as isize))
16310}
16311#[doc = "Load two-element tuples into two vectors"]
16312#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s64])"]
16313#[doc = "## Safety"]
16314#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16315#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16316#[inline(always)]
16317#[target_feature(enable = "sve")]
16318#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16319#[cfg_attr(test, assert_instr(ld2d))]
16320pub unsafe fn svld2_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x2_t {
16321    svld2_s64(pg, base.offset(svcntd() as isize * vnum as isize))
16322}
16323#[doc = "Load two-element tuples into two vectors"]
16324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u8])"]
16325#[doc = "## Safety"]
16326#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16327#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16328#[inline(always)]
16329#[target_feature(enable = "sve")]
16330#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16331#[cfg_attr(test, assert_instr(ld2b))]
16332pub unsafe fn svld2_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x2_t {
16333    svld2_u8(pg, base.offset(svcntb() as isize * vnum as isize))
16334}
16335#[doc = "Load two-element tuples into two vectors"]
16336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u16])"]
16337#[doc = "## Safety"]
16338#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16339#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16340#[inline(always)]
16341#[target_feature(enable = "sve")]
16342#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16343#[cfg_attr(test, assert_instr(ld2h))]
16344pub unsafe fn svld2_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x2_t {
16345    svld2_u16(pg, base.offset(svcnth() as isize * vnum as isize))
16346}
16347#[doc = "Load two-element tuples into two vectors"]
16348#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u32])"]
16349#[doc = "## Safety"]
16350#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16351#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16352#[inline(always)]
16353#[target_feature(enable = "sve")]
16354#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16355#[cfg_attr(test, assert_instr(ld2w))]
16356pub unsafe fn svld2_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x2_t {
16357    svld2_u32(pg, base.offset(svcntw() as isize * vnum as isize))
16358}
16359#[doc = "Load two-element tuples into two vectors"]
16360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u64])"]
16361#[doc = "## Safety"]
16362#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16363#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16364#[inline(always)]
16365#[target_feature(enable = "sve")]
16366#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16367#[cfg_attr(test, assert_instr(ld2d))]
16368pub unsafe fn svld2_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x2_t {
16369    svld2_u64(pg, base.offset(svcntd() as isize * vnum as isize))
16370}
16371#[doc = "Load three-element tuples into three vectors"]
16372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f32])"]
16373#[doc = "## Safety"]
16374#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16375#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16376#[inline(always)]
16377#[target_feature(enable = "sve")]
16378#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16379#[cfg_attr(test, assert_instr(ld3w))]
16380pub unsafe fn svld3_f32(pg: svbool_t, base: *const f32) -> svfloat32x3_t {
16381    unsafe extern "unadjusted" {
16382        #[cfg_attr(
16383            target_arch = "aarch64",
16384            link_name = "llvm.aarch64.sve.ld3.sret.nxv4f32"
16385        )]
16386        fn _svld3_f32(pg: svbool4_t, base: *const f32) -> svfloat32x3_t;
16387    }
16388    _svld3_f32(pg.sve_into(), base)
16389}
16390#[doc = "Load three-element tuples into three vectors"]
16391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f64])"]
16392#[doc = "## Safety"]
16393#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16394#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16395#[inline(always)]
16396#[target_feature(enable = "sve")]
16397#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16398#[cfg_attr(test, assert_instr(ld3d))]
16399pub unsafe fn svld3_f64(pg: svbool_t, base: *const f64) -> svfloat64x3_t {
16400    unsafe extern "unadjusted" {
16401        #[cfg_attr(
16402            target_arch = "aarch64",
16403            link_name = "llvm.aarch64.sve.ld3.sret.nxv2f64"
16404        )]
16405        fn _svld3_f64(pg: svbool2_t, base: *const f64) -> svfloat64x3_t;
16406    }
16407    _svld3_f64(pg.sve_into(), base)
16408}
16409#[doc = "Load three-element tuples into three vectors"]
16410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s8])"]
16411#[doc = "## Safety"]
16412#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16413#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16414#[inline(always)]
16415#[target_feature(enable = "sve")]
16416#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16417#[cfg_attr(test, assert_instr(ld3b))]
16418pub unsafe fn svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t {
16419    unsafe extern "unadjusted" {
16420        #[cfg_attr(
16421            target_arch = "aarch64",
16422            link_name = "llvm.aarch64.sve.ld3.sret.nxv16i8"
16423        )]
16424        fn _svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t;
16425    }
16426    _svld3_s8(pg, base)
16427}
16428#[doc = "Load three-element tuples into three vectors"]
16429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s16])"]
16430#[doc = "## Safety"]
16431#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16432#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16433#[inline(always)]
16434#[target_feature(enable = "sve")]
16435#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16436#[cfg_attr(test, assert_instr(ld3h))]
16437pub unsafe fn svld3_s16(pg: svbool_t, base: *const i16) -> svint16x3_t {
16438    unsafe extern "unadjusted" {
16439        #[cfg_attr(
16440            target_arch = "aarch64",
16441            link_name = "llvm.aarch64.sve.ld3.sret.nxv8i16"
16442        )]
16443        fn _svld3_s16(pg: svbool8_t, base: *const i16) -> svint16x3_t;
16444    }
16445    _svld3_s16(pg.sve_into(), base)
16446}
16447#[doc = "Load three-element tuples into three vectors"]
16448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s32])"]
16449#[doc = "## Safety"]
16450#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16451#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16452#[inline(always)]
16453#[target_feature(enable = "sve")]
16454#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16455#[cfg_attr(test, assert_instr(ld3w))]
16456pub unsafe fn svld3_s32(pg: svbool_t, base: *const i32) -> svint32x3_t {
16457    unsafe extern "unadjusted" {
16458        #[cfg_attr(
16459            target_arch = "aarch64",
16460            link_name = "llvm.aarch64.sve.ld3.sret.nxv4i32"
16461        )]
16462        fn _svld3_s32(pg: svbool4_t, base: *const i32) -> svint32x3_t;
16463    }
16464    _svld3_s32(pg.sve_into(), base)
16465}
16466#[doc = "Load three-element tuples into three vectors"]
16467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s64])"]
16468#[doc = "## Safety"]
16469#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16470#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16471#[inline(always)]
16472#[target_feature(enable = "sve")]
16473#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16474#[cfg_attr(test, assert_instr(ld3d))]
16475pub unsafe fn svld3_s64(pg: svbool_t, base: *const i64) -> svint64x3_t {
16476    unsafe extern "unadjusted" {
16477        #[cfg_attr(
16478            target_arch = "aarch64",
16479            link_name = "llvm.aarch64.sve.ld3.sret.nxv2i64"
16480        )]
16481        fn _svld3_s64(pg: svbool2_t, base: *const i64) -> svint64x3_t;
16482    }
16483    _svld3_s64(pg.sve_into(), base)
16484}
16485#[doc = "Load three-element tuples into three vectors"]
16486#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u8])"]
16487#[doc = "## Safety"]
16488#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16489#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16490#[inline(always)]
16491#[target_feature(enable = "sve")]
16492#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16493#[cfg_attr(test, assert_instr(ld3b))]
16494pub unsafe fn svld3_u8(pg: svbool_t, base: *const u8) -> svuint8x3_t {
16495    svld3_s8(pg, base.as_signed()).as_unsigned()
16496}
16497#[doc = "Load three-element tuples into three vectors"]
16498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u16])"]
16499#[doc = "## Safety"]
16500#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16501#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16502#[inline(always)]
16503#[target_feature(enable = "sve")]
16504#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16505#[cfg_attr(test, assert_instr(ld3h))]
16506pub unsafe fn svld3_u16(pg: svbool_t, base: *const u16) -> svuint16x3_t {
16507    svld3_s16(pg, base.as_signed()).as_unsigned()
16508}
16509#[doc = "Load three-element tuples into three vectors"]
16510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u32])"]
16511#[doc = "## Safety"]
16512#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16513#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16514#[inline(always)]
16515#[target_feature(enable = "sve")]
16516#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16517#[cfg_attr(test, assert_instr(ld3w))]
16518pub unsafe fn svld3_u32(pg: svbool_t, base: *const u32) -> svuint32x3_t {
16519    svld3_s32(pg, base.as_signed()).as_unsigned()
16520}
16521#[doc = "Load three-element tuples into three vectors"]
16522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u64])"]
16523#[doc = "## Safety"]
16524#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16525#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16526#[inline(always)]
16527#[target_feature(enable = "sve")]
16528#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16529#[cfg_attr(test, assert_instr(ld3d))]
16530pub unsafe fn svld3_u64(pg: svbool_t, base: *const u64) -> svuint64x3_t {
16531    svld3_s64(pg, base.as_signed()).as_unsigned()
16532}
16533#[doc = "Load three-element tuples into three vectors"]
16534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f32])"]
16535#[doc = "## Safety"]
16536#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16537#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16538#[inline(always)]
16539#[target_feature(enable = "sve")]
16540#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16541#[cfg_attr(test, assert_instr(ld3w))]
16542pub unsafe fn svld3_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x3_t {
16543    svld3_f32(pg, base.offset(svcntw() as isize * vnum as isize))
16544}
16545#[doc = "Load three-element tuples into three vectors"]
16546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f64])"]
16547#[doc = "## Safety"]
16548#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16549#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16550#[inline(always)]
16551#[target_feature(enable = "sve")]
16552#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16553#[cfg_attr(test, assert_instr(ld3d))]
16554pub unsafe fn svld3_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x3_t {
16555    svld3_f64(pg, base.offset(svcntd() as isize * vnum as isize))
16556}
16557#[doc = "Load three-element tuples into three vectors"]
16558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s8])"]
16559#[doc = "## Safety"]
16560#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16561#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16562#[inline(always)]
16563#[target_feature(enable = "sve")]
16564#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16565#[cfg_attr(test, assert_instr(ld3b))]
16566pub unsafe fn svld3_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x3_t {
16567    svld3_s8(pg, base.offset(svcntb() as isize * vnum as isize))
16568}
16569#[doc = "Load three-element tuples into three vectors"]
16570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s16])"]
16571#[doc = "## Safety"]
16572#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16573#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16574#[inline(always)]
16575#[target_feature(enable = "sve")]
16576#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16577#[cfg_attr(test, assert_instr(ld3h))]
16578pub unsafe fn svld3_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x3_t {
16579    svld3_s16(pg, base.offset(svcnth() as isize * vnum as isize))
16580}
16581#[doc = "Load three-element tuples into three vectors"]
16582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s32])"]
16583#[doc = "## Safety"]
16584#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16585#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16586#[inline(always)]
16587#[target_feature(enable = "sve")]
16588#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16589#[cfg_attr(test, assert_instr(ld3w))]
16590pub unsafe fn svld3_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x3_t {
16591    svld3_s32(pg, base.offset(svcntw() as isize * vnum as isize))
16592}
16593#[doc = "Load three-element tuples into three vectors"]
16594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s64])"]
16595#[doc = "## Safety"]
16596#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16597#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16598#[inline(always)]
16599#[target_feature(enable = "sve")]
16600#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16601#[cfg_attr(test, assert_instr(ld3d))]
16602pub unsafe fn svld3_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x3_t {
16603    svld3_s64(pg, base.offset(svcntd() as isize * vnum as isize))
16604}
16605#[doc = "Load three-element tuples into three vectors"]
16606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u8])"]
16607#[doc = "## Safety"]
16608#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16609#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16610#[inline(always)]
16611#[target_feature(enable = "sve")]
16612#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16613#[cfg_attr(test, assert_instr(ld3b))]
16614pub unsafe fn svld3_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x3_t {
16615    svld3_u8(pg, base.offset(svcntb() as isize * vnum as isize))
16616}
16617#[doc = "Load three-element tuples into three vectors"]
16618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u16])"]
16619#[doc = "## Safety"]
16620#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16621#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16622#[inline(always)]
16623#[target_feature(enable = "sve")]
16624#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16625#[cfg_attr(test, assert_instr(ld3h))]
16626pub unsafe fn svld3_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x3_t {
16627    svld3_u16(pg, base.offset(svcnth() as isize * vnum as isize))
16628}
16629#[doc = "Load three-element tuples into three vectors"]
16630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u32])"]
16631#[doc = "## Safety"]
16632#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16633#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16634#[inline(always)]
16635#[target_feature(enable = "sve")]
16636#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16637#[cfg_attr(test, assert_instr(ld3w))]
16638pub unsafe fn svld3_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x3_t {
16639    svld3_u32(pg, base.offset(svcntw() as isize * vnum as isize))
16640}
16641#[doc = "Load three-element tuples into three vectors"]
16642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u64])"]
16643#[doc = "## Safety"]
16644#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16645#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16646#[inline(always)]
16647#[target_feature(enable = "sve")]
16648#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16649#[cfg_attr(test, assert_instr(ld3d))]
16650pub unsafe fn svld3_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x3_t {
16651    svld3_u64(pg, base.offset(svcntd() as isize * vnum as isize))
16652}
16653#[doc = "Load four-element tuples into four vectors"]
16654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f32])"]
16655#[doc = "## Safety"]
16656#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16657#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16658#[inline(always)]
16659#[target_feature(enable = "sve")]
16660#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16661#[cfg_attr(test, assert_instr(ld4w))]
16662pub unsafe fn svld4_f32(pg: svbool_t, base: *const f32) -> svfloat32x4_t {
16663    unsafe extern "unadjusted" {
16664        #[cfg_attr(
16665            target_arch = "aarch64",
16666            link_name = "llvm.aarch64.sve.ld4.sret.nxv4f32"
16667        )]
16668        fn _svld4_f32(pg: svbool4_t, base: *const f32) -> svfloat32x4_t;
16669    }
16670    _svld4_f32(pg.sve_into(), base)
16671}
16672#[doc = "Load four-element tuples into four vectors"]
16673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f64])"]
16674#[doc = "## Safety"]
16675#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16676#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16677#[inline(always)]
16678#[target_feature(enable = "sve")]
16679#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16680#[cfg_attr(test, assert_instr(ld4d))]
16681pub unsafe fn svld4_f64(pg: svbool_t, base: *const f64) -> svfloat64x4_t {
16682    unsafe extern "unadjusted" {
16683        #[cfg_attr(
16684            target_arch = "aarch64",
16685            link_name = "llvm.aarch64.sve.ld4.sret.nxv2f64"
16686        )]
16687        fn _svld4_f64(pg: svbool2_t, base: *const f64) -> svfloat64x4_t;
16688    }
16689    _svld4_f64(pg.sve_into(), base)
16690}
16691#[doc = "Load four-element tuples into four vectors"]
16692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s8])"]
16693#[doc = "## Safety"]
16694#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16695#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16696#[inline(always)]
16697#[target_feature(enable = "sve")]
16698#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16699#[cfg_attr(test, assert_instr(ld4b))]
16700pub unsafe fn svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t {
16701    unsafe extern "unadjusted" {
16702        #[cfg_attr(
16703            target_arch = "aarch64",
16704            link_name = "llvm.aarch64.sve.ld4.sret.nxv16i8"
16705        )]
16706        fn _svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t;
16707    }
16708    _svld4_s8(pg, base)
16709}
16710#[doc = "Load four-element tuples into four vectors"]
16711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s16])"]
16712#[doc = "## Safety"]
16713#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16714#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16715#[inline(always)]
16716#[target_feature(enable = "sve")]
16717#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16718#[cfg_attr(test, assert_instr(ld4h))]
16719pub unsafe fn svld4_s16(pg: svbool_t, base: *const i16) -> svint16x4_t {
16720    unsafe extern "unadjusted" {
16721        #[cfg_attr(
16722            target_arch = "aarch64",
16723            link_name = "llvm.aarch64.sve.ld4.sret.nxv8i16"
16724        )]
16725        fn _svld4_s16(pg: svbool8_t, base: *const i16) -> svint16x4_t;
16726    }
16727    _svld4_s16(pg.sve_into(), base)
16728}
16729#[doc = "Load four-element tuples into four vectors"]
16730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s32])"]
16731#[doc = "## Safety"]
16732#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16733#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16734#[inline(always)]
16735#[target_feature(enable = "sve")]
16736#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16737#[cfg_attr(test, assert_instr(ld4w))]
16738pub unsafe fn svld4_s32(pg: svbool_t, base: *const i32) -> svint32x4_t {
16739    unsafe extern "unadjusted" {
16740        #[cfg_attr(
16741            target_arch = "aarch64",
16742            link_name = "llvm.aarch64.sve.ld4.sret.nxv4i32"
16743        )]
16744        fn _svld4_s32(pg: svbool4_t, base: *const i32) -> svint32x4_t;
16745    }
16746    _svld4_s32(pg.sve_into(), base)
16747}
16748#[doc = "Load four-element tuples into four vectors"]
16749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s64])"]
16750#[doc = "## Safety"]
16751#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16752#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16753#[inline(always)]
16754#[target_feature(enable = "sve")]
16755#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16756#[cfg_attr(test, assert_instr(ld4d))]
16757pub unsafe fn svld4_s64(pg: svbool_t, base: *const i64) -> svint64x4_t {
16758    unsafe extern "unadjusted" {
16759        #[cfg_attr(
16760            target_arch = "aarch64",
16761            link_name = "llvm.aarch64.sve.ld4.sret.nxv2i64"
16762        )]
16763        fn _svld4_s64(pg: svbool2_t, base: *const i64) -> svint64x4_t;
16764    }
16765    _svld4_s64(pg.sve_into(), base)
16766}
16767#[doc = "Load four-element tuples into four vectors"]
16768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u8])"]
16769#[doc = "## Safety"]
16770#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16771#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16772#[inline(always)]
16773#[target_feature(enable = "sve")]
16774#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16775#[cfg_attr(test, assert_instr(ld4b))]
16776pub unsafe fn svld4_u8(pg: svbool_t, base: *const u8) -> svuint8x4_t {
16777    svld4_s8(pg, base.as_signed()).as_unsigned()
16778}
16779#[doc = "Load four-element tuples into four vectors"]
16780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u16])"]
16781#[doc = "## Safety"]
16782#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16783#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16784#[inline(always)]
16785#[target_feature(enable = "sve")]
16786#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16787#[cfg_attr(test, assert_instr(ld4h))]
16788pub unsafe fn svld4_u16(pg: svbool_t, base: *const u16) -> svuint16x4_t {
16789    svld4_s16(pg, base.as_signed()).as_unsigned()
16790}
16791#[doc = "Load four-element tuples into four vectors"]
16792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u32])"]
16793#[doc = "## Safety"]
16794#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16795#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16796#[inline(always)]
16797#[target_feature(enable = "sve")]
16798#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16799#[cfg_attr(test, assert_instr(ld4w))]
16800pub unsafe fn svld4_u32(pg: svbool_t, base: *const u32) -> svuint32x4_t {
16801    svld4_s32(pg, base.as_signed()).as_unsigned()
16802}
16803#[doc = "Load four-element tuples into four vectors"]
16804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u64])"]
16805#[doc = "## Safety"]
16806#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
16807#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16808#[inline(always)]
16809#[target_feature(enable = "sve")]
16810#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16811#[cfg_attr(test, assert_instr(ld4d))]
16812pub unsafe fn svld4_u64(pg: svbool_t, base: *const u64) -> svuint64x4_t {
16813    svld4_s64(pg, base.as_signed()).as_unsigned()
16814}
16815#[doc = "Load four-element tuples into four vectors"]
16816#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f32])"]
16817#[doc = "## Safety"]
16818#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
16819#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16820#[inline(always)]
16821#[target_feature(enable = "sve")]
16822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16823#[cfg_attr(test, assert_instr(ld4w))]
16824pub unsafe fn svld4_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x4_t {
16825    svld4_f32(pg, base.offset(svcntw() as isize * vnum as isize))
16826}
16827#[doc = "Load four-element tuples into four vectors"]
16828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f64])"]
16829#[doc = "## Safety"]
16830#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
16831#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16832#[inline(always)]
16833#[target_feature(enable = "sve")]
16834#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16835#[cfg_attr(test, assert_instr(ld4d))]
16836pub unsafe fn svld4_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x4_t {
16837    svld4_f64(pg, base.offset(svcntd() as isize * vnum as isize))
16838}
16839#[doc = "Load four-element tuples into four vectors"]
16840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s8])"]
16841#[doc = "## Safety"]
16842#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
16843#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16844#[inline(always)]
16845#[target_feature(enable = "sve")]
16846#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16847#[cfg_attr(test, assert_instr(ld4b))]
16848pub unsafe fn svld4_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x4_t {
16849    svld4_s8(pg, base.offset(svcntb() as isize * vnum as isize))
16850}
16851#[doc = "Load four-element tuples into four vectors"]
16852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s16])"]
16853#[doc = "## Safety"]
16854#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
16855#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16856#[inline(always)]
16857#[target_feature(enable = "sve")]
16858#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16859#[cfg_attr(test, assert_instr(ld4h))]
16860pub unsafe fn svld4_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x4_t {
16861    svld4_s16(pg, base.offset(svcnth() as isize * vnum as isize))
16862}
16863#[doc = "Load four-element tuples into four vectors"]
16864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s32])"]
16865#[doc = "## Safety"]
16866#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
16867#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16868#[inline(always)]
16869#[target_feature(enable = "sve")]
16870#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16871#[cfg_attr(test, assert_instr(ld4w))]
16872pub unsafe fn svld4_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x4_t {
16873    svld4_s32(pg, base.offset(svcntw() as isize * vnum as isize))
16874}
16875#[doc = "Load four-element tuples into four vectors"]
16876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s64])"]
16877#[doc = "## Safety"]
16878#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
16879#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16880#[inline(always)]
16881#[target_feature(enable = "sve")]
16882#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16883#[cfg_attr(test, assert_instr(ld4d))]
16884pub unsafe fn svld4_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x4_t {
16885    svld4_s64(pg, base.offset(svcntd() as isize * vnum as isize))
16886}
16887#[doc = "Load four-element tuples into four vectors"]
16888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u8])"]
16889#[doc = "## Safety"]
16890#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
16891#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16892#[inline(always)]
16893#[target_feature(enable = "sve")]
16894#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16895#[cfg_attr(test, assert_instr(ld4b))]
16896pub unsafe fn svld4_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x4_t {
16897    svld4_u8(pg, base.offset(svcntb() as isize * vnum as isize))
16898}
16899#[doc = "Load four-element tuples into four vectors"]
16900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u16])"]
16901#[doc = "## Safety"]
16902#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
16903#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16904#[inline(always)]
16905#[target_feature(enable = "sve")]
16906#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16907#[cfg_attr(test, assert_instr(ld4h))]
16908pub unsafe fn svld4_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x4_t {
16909    svld4_u16(pg, base.offset(svcnth() as isize * vnum as isize))
16910}
16911#[doc = "Load four-element tuples into four vectors"]
16912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u32])"]
16913#[doc = "## Safety"]
16914#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
16915#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16916#[inline(always)]
16917#[target_feature(enable = "sve")]
16918#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16919#[cfg_attr(test, assert_instr(ld4w))]
16920pub unsafe fn svld4_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x4_t {
16921    svld4_u32(pg, base.offset(svcntw() as isize * vnum as isize))
16922}
16923#[doc = "Load four-element tuples into four vectors"]
16924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u64])"]
16925#[doc = "## Safety"]
16926#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
16927#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
16928#[inline(always)]
16929#[target_feature(enable = "sve")]
16930#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16931#[cfg_attr(test, assert_instr(ld4d))]
16932pub unsafe fn svld4_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x4_t {
16933    svld4_u64(pg, base.offset(svcntd() as isize * vnum as isize))
16934}
16935#[doc = "Unextended load, first-faulting"]
16936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f32])"]
16937#[doc = "## Safety"]
16938#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
16939#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
16940#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
16941#[inline(always)]
16942#[target_feature(enable = "sve")]
16943#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16944#[cfg_attr(test, assert_instr(ldff1w))]
16945pub unsafe fn svldff1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
16946    unsafe extern "unadjusted" {
16947        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4f32")]
16948        fn _svldff1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
16949    }
16950    _svldff1_f32(pg.sve_into(), base)
16951}
16952#[doc = "Unextended load, first-faulting"]
16953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f64])"]
16954#[doc = "## Safety"]
16955#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
16956#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
16957#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
16958#[inline(always)]
16959#[target_feature(enable = "sve")]
16960#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16961#[cfg_attr(test, assert_instr(ldff1d))]
16962pub unsafe fn svldff1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
16963    unsafe extern "unadjusted" {
16964        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2f64")]
16965        fn _svldff1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
16966    }
16967    _svldff1_f64(pg.sve_into(), base)
16968}
16969#[doc = "Unextended load, first-faulting"]
16970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s8])"]
16971#[doc = "## Safety"]
16972#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
16973#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
16974#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
16975#[inline(always)]
16976#[target_feature(enable = "sve")]
16977#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16978#[cfg_attr(test, assert_instr(ldff1b))]
16979pub unsafe fn svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t {
16980    unsafe extern "unadjusted" {
16981        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv16i8")]
16982        fn _svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t;
16983    }
16984    _svldff1_s8(pg, base)
16985}
16986#[doc = "Unextended load, first-faulting"]
16987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s16])"]
16988#[doc = "## Safety"]
16989#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
16990#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
16991#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
16992#[inline(always)]
16993#[target_feature(enable = "sve")]
16994#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
16995#[cfg_attr(test, assert_instr(ldff1h))]
16996pub unsafe fn svldff1_s16(pg: svbool_t, base: *const i16) -> svint16_t {
16997    unsafe extern "unadjusted" {
16998        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i16")]
16999        fn _svldff1_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
17000    }
17001    _svldff1_s16(pg.sve_into(), base)
17002}
17003#[doc = "Unextended load, first-faulting"]
17004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s32])"]
17005#[doc = "## Safety"]
17006#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17007#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17008#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17009#[inline(always)]
17010#[target_feature(enable = "sve")]
17011#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17012#[cfg_attr(test, assert_instr(ldff1w))]
17013pub unsafe fn svldff1_s32(pg: svbool_t, base: *const i32) -> svint32_t {
17014    unsafe extern "unadjusted" {
17015        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i32")]
17016        fn _svldff1_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
17017    }
17018    _svldff1_s32(pg.sve_into(), base)
17019}
17020#[doc = "Unextended load, first-faulting"]
17021#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s64])"]
17022#[doc = "## Safety"]
17023#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17024#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17025#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17026#[inline(always)]
17027#[target_feature(enable = "sve")]
17028#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17029#[cfg_attr(test, assert_instr(ldff1d))]
17030pub unsafe fn svldff1_s64(pg: svbool_t, base: *const i64) -> svint64_t {
17031    unsafe extern "unadjusted" {
17032        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i64")]
17033        fn _svldff1_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
17034    }
17035    _svldff1_s64(pg.sve_into(), base)
17036}
17037#[doc = "Unextended load, first-faulting"]
17038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u8])"]
17039#[doc = "## Safety"]
17040#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17041#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17042#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17043#[inline(always)]
17044#[target_feature(enable = "sve")]
17045#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17046#[cfg_attr(test, assert_instr(ldff1b))]
17047pub unsafe fn svldff1_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
17048    svldff1_s8(pg, base.as_signed()).as_unsigned()
17049}
17050#[doc = "Unextended load, first-faulting"]
17051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u16])"]
17052#[doc = "## Safety"]
17053#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17054#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17055#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17056#[inline(always)]
17057#[target_feature(enable = "sve")]
17058#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17059#[cfg_attr(test, assert_instr(ldff1h))]
17060pub unsafe fn svldff1_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
17061    svldff1_s16(pg, base.as_signed()).as_unsigned()
17062}
17063#[doc = "Unextended load, first-faulting"]
17064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u32])"]
17065#[doc = "## Safety"]
17066#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17067#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17068#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17069#[inline(always)]
17070#[target_feature(enable = "sve")]
17071#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17072#[cfg_attr(test, assert_instr(ldff1w))]
17073pub unsafe fn svldff1_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
17074    svldff1_s32(pg, base.as_signed()).as_unsigned()
17075}
17076#[doc = "Unextended load, first-faulting"]
17077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u64])"]
17078#[doc = "## Safety"]
17079#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17080#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17081#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17082#[inline(always)]
17083#[target_feature(enable = "sve")]
17084#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17085#[cfg_attr(test, assert_instr(ldff1d))]
17086pub unsafe fn svldff1_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
17087    svldff1_s64(pg, base.as_signed()).as_unsigned()
17088}
17089#[doc = "Unextended load, first-faulting"]
17090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_f32])"]
17091#[doc = "## Safety"]
17092#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17093#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17094#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17095#[inline(always)]
17096#[target_feature(enable = "sve")]
17097#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17098#[cfg_attr(test, assert_instr(ldff1w))]
17099pub unsafe fn svldff1_gather_s32index_f32(
17100    pg: svbool_t,
17101    base: *const f32,
17102    indices: svint32_t,
17103) -> svfloat32_t {
17104    unsafe extern "unadjusted" {
17105        #[cfg_attr(
17106            target_arch = "aarch64",
17107            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4f32"
17108        )]
17109        fn _svldff1_gather_s32index_f32(
17110            pg: svbool4_t,
17111            base: *const f32,
17112            indices: svint32_t,
17113        ) -> svfloat32_t;
17114    }
17115    _svldff1_gather_s32index_f32(pg.sve_into(), base, indices)
17116}
17117#[doc = "Unextended load, first-faulting"]
17118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_s32])"]
17119#[doc = "## Safety"]
17120#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17121#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17122#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17123#[inline(always)]
17124#[target_feature(enable = "sve")]
17125#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17126#[cfg_attr(test, assert_instr(ldff1w))]
17127pub unsafe fn svldff1_gather_s32index_s32(
17128    pg: svbool_t,
17129    base: *const i32,
17130    indices: svint32_t,
17131) -> svint32_t {
17132    unsafe extern "unadjusted" {
17133        #[cfg_attr(
17134            target_arch = "aarch64",
17135            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i32"
17136        )]
17137        fn _svldff1_gather_s32index_s32(
17138            pg: svbool4_t,
17139            base: *const i32,
17140            indices: svint32_t,
17141        ) -> svint32_t;
17142    }
17143    _svldff1_gather_s32index_s32(pg.sve_into(), base, indices)
17144}
17145#[doc = "Unextended load, first-faulting"]
17146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_u32])"]
17147#[doc = "## Safety"]
17148#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17149#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17150#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17151#[inline(always)]
17152#[target_feature(enable = "sve")]
17153#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17154#[cfg_attr(test, assert_instr(ldff1w))]
17155pub unsafe fn svldff1_gather_s32index_u32(
17156    pg: svbool_t,
17157    base: *const u32,
17158    indices: svint32_t,
17159) -> svuint32_t {
17160    svldff1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned()
17161}
17162#[doc = "Unextended load, first-faulting"]
17163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_f64])"]
17164#[doc = "## Safety"]
17165#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17166#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17167#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17168#[inline(always)]
17169#[target_feature(enable = "sve")]
17170#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17171#[cfg_attr(test, assert_instr(ldff1d))]
17172pub unsafe fn svldff1_gather_s64index_f64(
17173    pg: svbool_t,
17174    base: *const f64,
17175    indices: svint64_t,
17176) -> svfloat64_t {
17177    unsafe extern "unadjusted" {
17178        #[cfg_attr(
17179            target_arch = "aarch64",
17180            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2f64"
17181        )]
17182        fn _svldff1_gather_s64index_f64(
17183            pg: svbool2_t,
17184            base: *const f64,
17185            indices: svint64_t,
17186        ) -> svfloat64_t;
17187    }
17188    _svldff1_gather_s64index_f64(pg.sve_into(), base, indices)
17189}
17190#[doc = "Unextended load, first-faulting"]
17191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_s64])"]
17192#[doc = "## Safety"]
17193#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17194#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17195#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17196#[inline(always)]
17197#[target_feature(enable = "sve")]
17198#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17199#[cfg_attr(test, assert_instr(ldff1d))]
17200pub unsafe fn svldff1_gather_s64index_s64(
17201    pg: svbool_t,
17202    base: *const i64,
17203    indices: svint64_t,
17204) -> svint64_t {
17205    unsafe extern "unadjusted" {
17206        #[cfg_attr(
17207            target_arch = "aarch64",
17208            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i64"
17209        )]
17210        fn _svldff1_gather_s64index_s64(
17211            pg: svbool2_t,
17212            base: *const i64,
17213            indices: svint64_t,
17214        ) -> svint64_t;
17215    }
17216    _svldff1_gather_s64index_s64(pg.sve_into(), base, indices)
17217}
17218#[doc = "Unextended load, first-faulting"]
17219#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_u64])"]
17220#[doc = "## Safety"]
17221#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17222#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17223#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17224#[inline(always)]
17225#[target_feature(enable = "sve")]
17226#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17227#[cfg_attr(test, assert_instr(ldff1d))]
17228pub unsafe fn svldff1_gather_s64index_u64(
17229    pg: svbool_t,
17230    base: *const u64,
17231    indices: svint64_t,
17232) -> svuint64_t {
17233    svldff1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned()
17234}
17235#[doc = "Unextended load, first-faulting"]
17236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_f32])"]
17237#[doc = "## Safety"]
17238#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17239#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17240#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17241#[inline(always)]
17242#[target_feature(enable = "sve")]
17243#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17244#[cfg_attr(test, assert_instr(ldff1w))]
17245pub unsafe fn svldff1_gather_u32index_f32(
17246    pg: svbool_t,
17247    base: *const f32,
17248    indices: svuint32_t,
17249) -> svfloat32_t {
17250    unsafe extern "unadjusted" {
17251        #[cfg_attr(
17252            target_arch = "aarch64",
17253            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4f32"
17254        )]
17255        fn _svldff1_gather_u32index_f32(
17256            pg: svbool4_t,
17257            base: *const f32,
17258            indices: svint32_t,
17259        ) -> svfloat32_t;
17260    }
17261    _svldff1_gather_u32index_f32(pg.sve_into(), base, indices.as_signed())
17262}
17263#[doc = "Unextended load, first-faulting"]
17264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_s32])"]
17265#[doc = "## Safety"]
17266#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17267#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17268#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17269#[inline(always)]
17270#[target_feature(enable = "sve")]
17271#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17272#[cfg_attr(test, assert_instr(ldff1w))]
17273pub unsafe fn svldff1_gather_u32index_s32(
17274    pg: svbool_t,
17275    base: *const i32,
17276    indices: svuint32_t,
17277) -> svint32_t {
17278    unsafe extern "unadjusted" {
17279        #[cfg_attr(
17280            target_arch = "aarch64",
17281            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i32"
17282        )]
17283        fn _svldff1_gather_u32index_s32(
17284            pg: svbool4_t,
17285            base: *const i32,
17286            indices: svint32_t,
17287        ) -> svint32_t;
17288    }
17289    _svldff1_gather_u32index_s32(pg.sve_into(), base, indices.as_signed())
17290}
17291#[doc = "Unextended load, first-faulting"]
17292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_u32])"]
17293#[doc = "## Safety"]
17294#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17295#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17296#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17297#[inline(always)]
17298#[target_feature(enable = "sve")]
17299#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17300#[cfg_attr(test, assert_instr(ldff1w))]
17301pub unsafe fn svldff1_gather_u32index_u32(
17302    pg: svbool_t,
17303    base: *const u32,
17304    indices: svuint32_t,
17305) -> svuint32_t {
17306    svldff1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned()
17307}
17308#[doc = "Unextended load, first-faulting"]
17309#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_f64])"]
17310#[doc = "## Safety"]
17311#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17312#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17313#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17314#[inline(always)]
17315#[target_feature(enable = "sve")]
17316#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17317#[cfg_attr(test, assert_instr(ldff1d))]
17318pub unsafe fn svldff1_gather_u64index_f64(
17319    pg: svbool_t,
17320    base: *const f64,
17321    indices: svuint64_t,
17322) -> svfloat64_t {
17323    svldff1_gather_s64index_f64(pg, base, indices.as_signed())
17324}
17325#[doc = "Unextended load, first-faulting"]
17326#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_s64])"]
17327#[doc = "## Safety"]
17328#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17329#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17330#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17331#[inline(always)]
17332#[target_feature(enable = "sve")]
17333#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17334#[cfg_attr(test, assert_instr(ldff1d))]
17335pub unsafe fn svldff1_gather_u64index_s64(
17336    pg: svbool_t,
17337    base: *const i64,
17338    indices: svuint64_t,
17339) -> svint64_t {
17340    svldff1_gather_s64index_s64(pg, base, indices.as_signed())
17341}
17342#[doc = "Unextended load, first-faulting"]
17343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_u64])"]
17344#[doc = "## Safety"]
17345#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17346#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17347#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17348#[inline(always)]
17349#[target_feature(enable = "sve")]
17350#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17351#[cfg_attr(test, assert_instr(ldff1d))]
17352pub unsafe fn svldff1_gather_u64index_u64(
17353    pg: svbool_t,
17354    base: *const u64,
17355    indices: svuint64_t,
17356) -> svuint64_t {
17357    svldff1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned()
17358}
17359#[doc = "Unextended load, first-faulting"]
17360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_f32])"]
17361#[doc = "## Safety"]
17362#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17363#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17364#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17365#[inline(always)]
17366#[target_feature(enable = "sve")]
17367#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17368#[cfg_attr(test, assert_instr(ldff1w))]
17369pub unsafe fn svldff1_gather_s32offset_f32(
17370    pg: svbool_t,
17371    base: *const f32,
17372    offsets: svint32_t,
17373) -> svfloat32_t {
17374    unsafe extern "unadjusted" {
17375        #[cfg_attr(
17376            target_arch = "aarch64",
17377            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4f32"
17378        )]
17379        fn _svldff1_gather_s32offset_f32(
17380            pg: svbool4_t,
17381            base: *const f32,
17382            offsets: svint32_t,
17383        ) -> svfloat32_t;
17384    }
17385    _svldff1_gather_s32offset_f32(pg.sve_into(), base, offsets)
17386}
17387#[doc = "Unextended load, first-faulting"]
17388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_s32])"]
17389#[doc = "## Safety"]
17390#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17391#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17392#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17393#[inline(always)]
17394#[target_feature(enable = "sve")]
17395#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17396#[cfg_attr(test, assert_instr(ldff1w))]
17397pub unsafe fn svldff1_gather_s32offset_s32(
17398    pg: svbool_t,
17399    base: *const i32,
17400    offsets: svint32_t,
17401) -> svint32_t {
17402    unsafe extern "unadjusted" {
17403        #[cfg_attr(
17404            target_arch = "aarch64",
17405            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i32"
17406        )]
17407        fn _svldff1_gather_s32offset_s32(
17408            pg: svbool4_t,
17409            base: *const i32,
17410            offsets: svint32_t,
17411        ) -> svint32_t;
17412    }
17413    _svldff1_gather_s32offset_s32(pg.sve_into(), base, offsets)
17414}
17415#[doc = "Unextended load, first-faulting"]
17416#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_u32])"]
17417#[doc = "## Safety"]
17418#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17419#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17420#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17421#[inline(always)]
17422#[target_feature(enable = "sve")]
17423#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17424#[cfg_attr(test, assert_instr(ldff1w))]
17425pub unsafe fn svldff1_gather_s32offset_u32(
17426    pg: svbool_t,
17427    base: *const u32,
17428    offsets: svint32_t,
17429) -> svuint32_t {
17430    svldff1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned()
17431}
17432#[doc = "Unextended load, first-faulting"]
17433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_f64])"]
17434#[doc = "## Safety"]
17435#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17436#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17437#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17438#[inline(always)]
17439#[target_feature(enable = "sve")]
17440#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17441#[cfg_attr(test, assert_instr(ldff1d))]
17442pub unsafe fn svldff1_gather_s64offset_f64(
17443    pg: svbool_t,
17444    base: *const f64,
17445    offsets: svint64_t,
17446) -> svfloat64_t {
17447    unsafe extern "unadjusted" {
17448        #[cfg_attr(
17449            target_arch = "aarch64",
17450            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2f64"
17451        )]
17452        fn _svldff1_gather_s64offset_f64(
17453            pg: svbool2_t,
17454            base: *const f64,
17455            offsets: svint64_t,
17456        ) -> svfloat64_t;
17457    }
17458    _svldff1_gather_s64offset_f64(pg.sve_into(), base, offsets)
17459}
17460#[doc = "Unextended load, first-faulting"]
17461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_s64])"]
17462#[doc = "## Safety"]
17463#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17464#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17465#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17466#[inline(always)]
17467#[target_feature(enable = "sve")]
17468#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17469#[cfg_attr(test, assert_instr(ldff1d))]
17470pub unsafe fn svldff1_gather_s64offset_s64(
17471    pg: svbool_t,
17472    base: *const i64,
17473    offsets: svint64_t,
17474) -> svint64_t {
17475    unsafe extern "unadjusted" {
17476        #[cfg_attr(
17477            target_arch = "aarch64",
17478            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i64"
17479        )]
17480        fn _svldff1_gather_s64offset_s64(
17481            pg: svbool2_t,
17482            base: *const i64,
17483            offsets: svint64_t,
17484        ) -> svint64_t;
17485    }
17486    _svldff1_gather_s64offset_s64(pg.sve_into(), base, offsets)
17487}
17488#[doc = "Unextended load, first-faulting"]
17489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_u64])"]
17490#[doc = "## Safety"]
17491#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17492#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17493#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17494#[inline(always)]
17495#[target_feature(enable = "sve")]
17496#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17497#[cfg_attr(test, assert_instr(ldff1d))]
17498pub unsafe fn svldff1_gather_s64offset_u64(
17499    pg: svbool_t,
17500    base: *const u64,
17501    offsets: svint64_t,
17502) -> svuint64_t {
17503    svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned()
17504}
17505#[doc = "Unextended load, first-faulting"]
17506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_f32])"]
17507#[doc = "## Safety"]
17508#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17509#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17510#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17511#[inline(always)]
17512#[target_feature(enable = "sve")]
17513#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17514#[cfg_attr(test, assert_instr(ldff1w))]
17515pub unsafe fn svldff1_gather_u32offset_f32(
17516    pg: svbool_t,
17517    base: *const f32,
17518    offsets: svuint32_t,
17519) -> svfloat32_t {
17520    unsafe extern "unadjusted" {
17521        #[cfg_attr(
17522            target_arch = "aarch64",
17523            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4f32"
17524        )]
17525        fn _svldff1_gather_u32offset_f32(
17526            pg: svbool4_t,
17527            base: *const f32,
17528            offsets: svint32_t,
17529        ) -> svfloat32_t;
17530    }
17531    _svldff1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed())
17532}
17533#[doc = "Unextended load, first-faulting"]
17534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_s32])"]
17535#[doc = "## Safety"]
17536#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17537#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17538#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17539#[inline(always)]
17540#[target_feature(enable = "sve")]
17541#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17542#[cfg_attr(test, assert_instr(ldff1w))]
17543pub unsafe fn svldff1_gather_u32offset_s32(
17544    pg: svbool_t,
17545    base: *const i32,
17546    offsets: svuint32_t,
17547) -> svint32_t {
17548    unsafe extern "unadjusted" {
17549        #[cfg_attr(
17550            target_arch = "aarch64",
17551            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i32"
17552        )]
17553        fn _svldff1_gather_u32offset_s32(
17554            pg: svbool4_t,
17555            base: *const i32,
17556            offsets: svint32_t,
17557        ) -> svint32_t;
17558    }
17559    _svldff1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed())
17560}
17561#[doc = "Unextended load, first-faulting"]
17562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_u32])"]
17563#[doc = "## Safety"]
17564#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17565#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17566#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17567#[inline(always)]
17568#[target_feature(enable = "sve")]
17569#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17570#[cfg_attr(test, assert_instr(ldff1w))]
17571pub unsafe fn svldff1_gather_u32offset_u32(
17572    pg: svbool_t,
17573    base: *const u32,
17574    offsets: svuint32_t,
17575) -> svuint32_t {
17576    svldff1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned()
17577}
17578#[doc = "Unextended load, first-faulting"]
17579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_f64])"]
17580#[doc = "## Safety"]
17581#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17582#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17583#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17584#[inline(always)]
17585#[target_feature(enable = "sve")]
17586#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17587#[cfg_attr(test, assert_instr(ldff1d))]
17588pub unsafe fn svldff1_gather_u64offset_f64(
17589    pg: svbool_t,
17590    base: *const f64,
17591    offsets: svuint64_t,
17592) -> svfloat64_t {
17593    svldff1_gather_s64offset_f64(pg, base, offsets.as_signed())
17594}
17595#[doc = "Unextended load, first-faulting"]
17596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_s64])"]
17597#[doc = "## Safety"]
17598#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17599#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17600#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17601#[inline(always)]
17602#[target_feature(enable = "sve")]
17603#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17604#[cfg_attr(test, assert_instr(ldff1d))]
17605pub unsafe fn svldff1_gather_u64offset_s64(
17606    pg: svbool_t,
17607    base: *const i64,
17608    offsets: svuint64_t,
17609) -> svint64_t {
17610    svldff1_gather_s64offset_s64(pg, base, offsets.as_signed())
17611}
17612#[doc = "Unextended load, first-faulting"]
17613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_u64])"]
17614#[doc = "## Safety"]
17615#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17616#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17617#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17618#[inline(always)]
17619#[target_feature(enable = "sve")]
17620#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17621#[cfg_attr(test, assert_instr(ldff1d))]
17622pub unsafe fn svldff1_gather_u64offset_u64(
17623    pg: svbool_t,
17624    base: *const u64,
17625    offsets: svuint64_t,
17626) -> svuint64_t {
17627    svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned()
17628}
17629#[doc = "Unextended load, first-faulting"]
17630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_f32)"]
17631#[doc = "## Safety"]
17632#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17633#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17634#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17635#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17636#[inline(always)]
17637#[target_feature(enable = "sve")]
17638#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17639#[cfg_attr(test, assert_instr(ldff1w))]
17640pub unsafe fn svldff1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t {
17641    svldff1_gather_u32base_offset_f32(pg, bases, 0)
17642}
17643#[doc = "Unextended load, first-faulting"]
17644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_s32)"]
17645#[doc = "## Safety"]
17646#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17647#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17648#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17649#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17650#[inline(always)]
17651#[target_feature(enable = "sve")]
17652#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17653#[cfg_attr(test, assert_instr(ldff1w))]
17654pub unsafe fn svldff1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
17655    svldff1_gather_u32base_offset_s32(pg, bases, 0)
17656}
17657#[doc = "Unextended load, first-faulting"]
17658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_u32)"]
17659#[doc = "## Safety"]
17660#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17661#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17662#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17663#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17664#[inline(always)]
17665#[target_feature(enable = "sve")]
17666#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17667#[cfg_attr(test, assert_instr(ldff1w))]
17668pub unsafe fn svldff1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
17669    svldff1_gather_u32base_offset_u32(pg, bases, 0)
17670}
17671#[doc = "Unextended load, first-faulting"]
17672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_f64)"]
17673#[doc = "## Safety"]
17674#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17675#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17676#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17677#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17678#[inline(always)]
17679#[target_feature(enable = "sve")]
17680#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17681#[cfg_attr(test, assert_instr(ldff1d))]
17682pub unsafe fn svldff1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t {
17683    svldff1_gather_u64base_offset_f64(pg, bases, 0)
17684}
17685#[doc = "Unextended load, first-faulting"]
17686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_s64)"]
17687#[doc = "## Safety"]
17688#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17689#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17690#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17691#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17692#[inline(always)]
17693#[target_feature(enable = "sve")]
17694#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17695#[cfg_attr(test, assert_instr(ldff1d))]
17696pub unsafe fn svldff1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
17697    svldff1_gather_u64base_offset_s64(pg, bases, 0)
17698}
17699#[doc = "Unextended load, first-faulting"]
17700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_u64)"]
17701#[doc = "## Safety"]
17702#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17703#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17704#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17705#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17706#[inline(always)]
17707#[target_feature(enable = "sve")]
17708#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17709#[cfg_attr(test, assert_instr(ldff1d))]
17710pub unsafe fn svldff1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
17711    svldff1_gather_u64base_offset_u64(pg, bases, 0)
17712}
17713#[doc = "Unextended load, first-faulting"]
17714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_f32)"]
17715#[doc = "## Safety"]
17716#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17717#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17718#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17719#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17720#[inline(always)]
17721#[target_feature(enable = "sve")]
17722#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17723#[cfg_attr(test, assert_instr(ldff1w))]
17724pub unsafe fn svldff1_gather_u32base_index_f32(
17725    pg: svbool_t,
17726    bases: svuint32_t,
17727    index: i64,
17728) -> svfloat32_t {
17729    svldff1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2))
17730}
17731#[doc = "Unextended load, first-faulting"]
17732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_s32)"]
17733#[doc = "## Safety"]
17734#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17735#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17736#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17737#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17738#[inline(always)]
17739#[target_feature(enable = "sve")]
17740#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17741#[cfg_attr(test, assert_instr(ldff1w))]
17742pub unsafe fn svldff1_gather_u32base_index_s32(
17743    pg: svbool_t,
17744    bases: svuint32_t,
17745    index: i64,
17746) -> svint32_t {
17747    svldff1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2))
17748}
17749#[doc = "Unextended load, first-faulting"]
17750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_u32)"]
17751#[doc = "## Safety"]
17752#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17753#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17754#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17755#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17756#[inline(always)]
17757#[target_feature(enable = "sve")]
17758#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17759#[cfg_attr(test, assert_instr(ldff1w))]
17760pub unsafe fn svldff1_gather_u32base_index_u32(
17761    pg: svbool_t,
17762    bases: svuint32_t,
17763    index: i64,
17764) -> svuint32_t {
17765    svldff1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2))
17766}
17767#[doc = "Unextended load, first-faulting"]
17768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_f64)"]
17769#[doc = "## Safety"]
17770#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17771#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17772#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17773#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17774#[inline(always)]
17775#[target_feature(enable = "sve")]
17776#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17777#[cfg_attr(test, assert_instr(ldff1d))]
17778pub unsafe fn svldff1_gather_u64base_index_f64(
17779    pg: svbool_t,
17780    bases: svuint64_t,
17781    index: i64,
17782) -> svfloat64_t {
17783    svldff1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3))
17784}
17785#[doc = "Unextended load, first-faulting"]
17786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_s64)"]
17787#[doc = "## Safety"]
17788#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17789#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17790#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17791#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17792#[inline(always)]
17793#[target_feature(enable = "sve")]
17794#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17795#[cfg_attr(test, assert_instr(ldff1d))]
17796pub unsafe fn svldff1_gather_u64base_index_s64(
17797    pg: svbool_t,
17798    bases: svuint64_t,
17799    index: i64,
17800) -> svint64_t {
17801    svldff1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3))
17802}
17803#[doc = "Unextended load, first-faulting"]
17804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_u64)"]
17805#[doc = "## Safety"]
17806#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17807#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17808#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17809#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17810#[inline(always)]
17811#[target_feature(enable = "sve")]
17812#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17813#[cfg_attr(test, assert_instr(ldff1d))]
17814pub unsafe fn svldff1_gather_u64base_index_u64(
17815    pg: svbool_t,
17816    bases: svuint64_t,
17817    index: i64,
17818) -> svuint64_t {
17819    svldff1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3))
17820}
17821#[doc = "Unextended load, first-faulting"]
17822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_f32)"]
17823#[doc = "## Safety"]
17824#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17825#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17826#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17827#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17828#[inline(always)]
17829#[target_feature(enable = "sve")]
17830#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17831#[cfg_attr(test, assert_instr(ldff1w))]
17832pub unsafe fn svldff1_gather_u32base_offset_f32(
17833    pg: svbool_t,
17834    bases: svuint32_t,
17835    offset: i64,
17836) -> svfloat32_t {
17837    unsafe extern "unadjusted" {
17838        #[cfg_attr(
17839            target_arch = "aarch64",
17840            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4f32.nxv4i32"
17841        )]
17842        fn _svldff1_gather_u32base_offset_f32(
17843            pg: svbool4_t,
17844            bases: svint32_t,
17845            offset: i64,
17846        ) -> svfloat32_t;
17847    }
17848    _svldff1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset)
17849}
17850#[doc = "Unextended load, first-faulting"]
17851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_s32)"]
17852#[doc = "## Safety"]
17853#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17854#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17855#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17856#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17857#[inline(always)]
17858#[target_feature(enable = "sve")]
17859#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17860#[cfg_attr(test, assert_instr(ldff1w))]
17861pub unsafe fn svldff1_gather_u32base_offset_s32(
17862    pg: svbool_t,
17863    bases: svuint32_t,
17864    offset: i64,
17865) -> svint32_t {
17866    unsafe extern "unadjusted" {
17867        #[cfg_attr(
17868            target_arch = "aarch64",
17869            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i32.nxv4i32"
17870        )]
17871        fn _svldff1_gather_u32base_offset_s32(
17872            pg: svbool4_t,
17873            bases: svint32_t,
17874            offset: i64,
17875        ) -> svint32_t;
17876    }
17877    _svldff1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset)
17878}
17879#[doc = "Unextended load, first-faulting"]
17880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_u32)"]
17881#[doc = "## Safety"]
17882#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17883#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17884#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17885#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17886#[inline(always)]
17887#[target_feature(enable = "sve")]
17888#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17889#[cfg_attr(test, assert_instr(ldff1w))]
17890pub unsafe fn svldff1_gather_u32base_offset_u32(
17891    pg: svbool_t,
17892    bases: svuint32_t,
17893    offset: i64,
17894) -> svuint32_t {
17895    svldff1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
17896}
17897#[doc = "Unextended load, first-faulting"]
17898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_f64)"]
17899#[doc = "## Safety"]
17900#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17901#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17902#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17903#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17904#[inline(always)]
17905#[target_feature(enable = "sve")]
17906#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17907#[cfg_attr(test, assert_instr(ldff1d))]
17908pub unsafe fn svldff1_gather_u64base_offset_f64(
17909    pg: svbool_t,
17910    bases: svuint64_t,
17911    offset: i64,
17912) -> svfloat64_t {
17913    unsafe extern "unadjusted" {
17914        #[cfg_attr(
17915            target_arch = "aarch64",
17916            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2f64.nxv2i64"
17917        )]
17918        fn _svldff1_gather_u64base_offset_f64(
17919            pg: svbool2_t,
17920            bases: svint64_t,
17921            offset: i64,
17922        ) -> svfloat64_t;
17923    }
17924    _svldff1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset)
17925}
17926#[doc = "Unextended load, first-faulting"]
17927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_s64)"]
17928#[doc = "## Safety"]
17929#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17930#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17931#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17932#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17933#[inline(always)]
17934#[target_feature(enable = "sve")]
17935#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17936#[cfg_attr(test, assert_instr(ldff1d))]
17937pub unsafe fn svldff1_gather_u64base_offset_s64(
17938    pg: svbool_t,
17939    bases: svuint64_t,
17940    offset: i64,
17941) -> svint64_t {
17942    unsafe extern "unadjusted" {
17943        #[cfg_attr(
17944            target_arch = "aarch64",
17945            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i64.nxv2i64"
17946        )]
17947        fn _svldff1_gather_u64base_offset_s64(
17948            pg: svbool2_t,
17949            bases: svint64_t,
17950            offset: i64,
17951        ) -> svint64_t;
17952    }
17953    _svldff1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset)
17954}
17955#[doc = "Unextended load, first-faulting"]
17956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_u64)"]
17957#[doc = "## Safety"]
17958#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17959#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17960#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17961#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
17962#[inline(always)]
17963#[target_feature(enable = "sve")]
17964#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17965#[cfg_attr(test, assert_instr(ldff1d))]
17966pub unsafe fn svldff1_gather_u64base_offset_u64(
17967    pg: svbool_t,
17968    bases: svuint64_t,
17969    offset: i64,
17970) -> svuint64_t {
17971    svldff1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
17972}
17973#[doc = "Unextended load, first-faulting"]
17974#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f32])"]
17975#[doc = "## Safety"]
17976#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
17977#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17978#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17979#[inline(always)]
17980#[target_feature(enable = "sve")]
17981#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17982#[cfg_attr(test, assert_instr(ldff1w))]
17983pub unsafe fn svldff1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t {
17984    svldff1_f32(pg, base.offset(svcntw() as isize * vnum as isize))
17985}
17986#[doc = "Unextended load, first-faulting"]
17987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f64])"]
17988#[doc = "## Safety"]
17989#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
17990#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
17991#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
17992#[inline(always)]
17993#[target_feature(enable = "sve")]
17994#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
17995#[cfg_attr(test, assert_instr(ldff1d))]
17996pub unsafe fn svldff1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t {
17997    svldff1_f64(pg, base.offset(svcntd() as isize * vnum as isize))
17998}
17999#[doc = "Unextended load, first-faulting"]
18000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s8])"]
18001#[doc = "## Safety"]
18002#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
18003#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18004#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18005#[inline(always)]
18006#[target_feature(enable = "sve")]
18007#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18008#[cfg_attr(test, assert_instr(ldff1b))]
18009pub unsafe fn svldff1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t {
18010    svldff1_s8(pg, base.offset(svcntb() as isize * vnum as isize))
18011}
18012#[doc = "Unextended load, first-faulting"]
18013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s16])"]
18014#[doc = "## Safety"]
18015#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
18016#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18017#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18018#[inline(always)]
18019#[target_feature(enable = "sve")]
18020#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18021#[cfg_attr(test, assert_instr(ldff1h))]
18022pub unsafe fn svldff1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t {
18023    svldff1_s16(pg, base.offset(svcnth() as isize * vnum as isize))
18024}
18025#[doc = "Unextended load, first-faulting"]
18026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s32])"]
18027#[doc = "## Safety"]
18028#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
18029#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18030#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18031#[inline(always)]
18032#[target_feature(enable = "sve")]
18033#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18034#[cfg_attr(test, assert_instr(ldff1w))]
18035pub unsafe fn svldff1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t {
18036    svldff1_s32(pg, base.offset(svcntw() as isize * vnum as isize))
18037}
18038#[doc = "Unextended load, first-faulting"]
18039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s64])"]
18040#[doc = "## Safety"]
18041#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
18042#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18043#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18044#[inline(always)]
18045#[target_feature(enable = "sve")]
18046#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18047#[cfg_attr(test, assert_instr(ldff1d))]
18048pub unsafe fn svldff1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t {
18049    svldff1_s64(pg, base.offset(svcntd() as isize * vnum as isize))
18050}
18051#[doc = "Unextended load, first-faulting"]
18052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u8])"]
18053#[doc = "## Safety"]
18054#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
18055#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18056#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18057#[inline(always)]
18058#[target_feature(enable = "sve")]
18059#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18060#[cfg_attr(test, assert_instr(ldff1b))]
18061pub unsafe fn svldff1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t {
18062    svldff1_u8(pg, base.offset(svcntb() as isize * vnum as isize))
18063}
18064#[doc = "Unextended load, first-faulting"]
18065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u16])"]
18066#[doc = "## Safety"]
18067#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
18068#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18069#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18070#[inline(always)]
18071#[target_feature(enable = "sve")]
18072#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18073#[cfg_attr(test, assert_instr(ldff1h))]
18074pub unsafe fn svldff1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t {
18075    svldff1_u16(pg, base.offset(svcnth() as isize * vnum as isize))
18076}
18077#[doc = "Unextended load, first-faulting"]
18078#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u32])"]
18079#[doc = "## Safety"]
18080#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
18081#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18082#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18083#[inline(always)]
18084#[target_feature(enable = "sve")]
18085#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18086#[cfg_attr(test, assert_instr(ldff1w))]
18087pub unsafe fn svldff1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t {
18088    svldff1_u32(pg, base.offset(svcntw() as isize * vnum as isize))
18089}
18090#[doc = "Unextended load, first-faulting"]
18091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u64])"]
18092#[doc = "## Safety"]
18093#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
18094#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18095#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18096#[inline(always)]
18097#[target_feature(enable = "sve")]
18098#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18099#[cfg_attr(test, assert_instr(ldff1d))]
18100pub unsafe fn svldff1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t {
18101    svldff1_u64(pg, base.offset(svcntd() as isize * vnum as isize))
18102}
18103#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_s32)"]
18105#[doc = "## Safety"]
18106#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18107#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18108#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18109#[inline(always)]
18110#[target_feature(enable = "sve")]
18111#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18112#[cfg_attr(test, assert_instr(ldff1sb))]
18113pub unsafe fn svldff1sb_gather_s32offset_s32(
18114    pg: svbool_t,
18115    base: *const i8,
18116    offsets: svint32_t,
18117) -> svint32_t {
18118    unsafe extern "unadjusted" {
18119        #[cfg_attr(
18120            target_arch = "aarch64",
18121            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8"
18122        )]
18123        fn _svldff1sb_gather_s32offset_s32(
18124            pg: svbool4_t,
18125            base: *const i8,
18126            offsets: svint32_t,
18127        ) -> nxv4i8;
18128    }
18129    crate::intrinsics::simd::simd_cast(_svldff1sb_gather_s32offset_s32(
18130        pg.sve_into(),
18131        base,
18132        offsets,
18133    ))
18134}
18135#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18136#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_s32)"]
18137#[doc = "## Safety"]
18138#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18139#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18140#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18141#[inline(always)]
18142#[target_feature(enable = "sve")]
18143#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18144#[cfg_attr(test, assert_instr(ldff1sh))]
18145pub unsafe fn svldff1sh_gather_s32offset_s32(
18146    pg: svbool_t,
18147    base: *const i16,
18148    offsets: svint32_t,
18149) -> svint32_t {
18150    unsafe extern "unadjusted" {
18151        #[cfg_attr(
18152            target_arch = "aarch64",
18153            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16"
18154        )]
18155        fn _svldff1sh_gather_s32offset_s32(
18156            pg: svbool4_t,
18157            base: *const i16,
18158            offsets: svint32_t,
18159        ) -> nxv4i16;
18160    }
18161    crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s32offset_s32(
18162        pg.sve_into(),
18163        base,
18164        offsets,
18165    ))
18166}
18167#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_u32)"]
18169#[doc = "## Safety"]
18170#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18171#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18172#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18173#[inline(always)]
18174#[target_feature(enable = "sve")]
18175#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18176#[cfg_attr(test, assert_instr(ldff1sb))]
18177pub unsafe fn svldff1sb_gather_s32offset_u32(
18178    pg: svbool_t,
18179    base: *const i8,
18180    offsets: svint32_t,
18181) -> svuint32_t {
18182    svldff1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned()
18183}
18184#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_u32)"]
18186#[doc = "## Safety"]
18187#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18188#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18189#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18190#[inline(always)]
18191#[target_feature(enable = "sve")]
18192#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18193#[cfg_attr(test, assert_instr(ldff1sh))]
18194pub unsafe fn svldff1sh_gather_s32offset_u32(
18195    pg: svbool_t,
18196    base: *const i16,
18197    offsets: svint32_t,
18198) -> svuint32_t {
18199    svldff1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned()
18200}
18201#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_s64)"]
18203#[doc = "## Safety"]
18204#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18205#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18206#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18207#[inline(always)]
18208#[target_feature(enable = "sve")]
18209#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18210#[cfg_attr(test, assert_instr(ldff1sb))]
18211pub unsafe fn svldff1sb_gather_s64offset_s64(
18212    pg: svbool_t,
18213    base: *const i8,
18214    offsets: svint64_t,
18215) -> svint64_t {
18216    unsafe extern "unadjusted" {
18217        #[cfg_attr(
18218            target_arch = "aarch64",
18219            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8"
18220        )]
18221        fn _svldff1sb_gather_s64offset_s64(
18222            pg: svbool2_t,
18223            base: *const i8,
18224            offsets: svint64_t,
18225        ) -> nxv2i8;
18226    }
18227    crate::intrinsics::simd::simd_cast(_svldff1sb_gather_s64offset_s64(
18228        pg.sve_into(),
18229        base,
18230        offsets,
18231    ))
18232}
18233#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_s64)"]
18235#[doc = "## Safety"]
18236#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18237#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18238#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18239#[inline(always)]
18240#[target_feature(enable = "sve")]
18241#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18242#[cfg_attr(test, assert_instr(ldff1sh))]
18243pub unsafe fn svldff1sh_gather_s64offset_s64(
18244    pg: svbool_t,
18245    base: *const i16,
18246    offsets: svint64_t,
18247) -> svint64_t {
18248    unsafe extern "unadjusted" {
18249        #[cfg_attr(
18250            target_arch = "aarch64",
18251            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16"
18252        )]
18253        fn _svldff1sh_gather_s64offset_s64(
18254            pg: svbool2_t,
18255            base: *const i16,
18256            offsets: svint64_t,
18257        ) -> nxv2i16;
18258    }
18259    crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s64offset_s64(
18260        pg.sve_into(),
18261        base,
18262        offsets,
18263    ))
18264}
18265#[doc = "Load 32-bit data and sign-extend, first-faulting"]
18266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_s64)"]
18267#[doc = "## Safety"]
18268#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18269#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18270#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18271#[inline(always)]
18272#[target_feature(enable = "sve")]
18273#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18274#[cfg_attr(test, assert_instr(ldff1sw))]
18275pub unsafe fn svldff1sw_gather_s64offset_s64(
18276    pg: svbool_t,
18277    base: *const i32,
18278    offsets: svint64_t,
18279) -> svint64_t {
18280    unsafe extern "unadjusted" {
18281        #[cfg_attr(
18282            target_arch = "aarch64",
18283            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32"
18284        )]
18285        fn _svldff1sw_gather_s64offset_s64(
18286            pg: svbool2_t,
18287            base: *const i32,
18288            offsets: svint64_t,
18289        ) -> nxv2i32;
18290    }
18291    crate::intrinsics::simd::simd_cast(_svldff1sw_gather_s64offset_s64(
18292        pg.sve_into(),
18293        base,
18294        offsets,
18295    ))
18296}
18297#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_u64)"]
18299#[doc = "## Safety"]
18300#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18301#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18302#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18303#[inline(always)]
18304#[target_feature(enable = "sve")]
18305#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18306#[cfg_attr(test, assert_instr(ldff1sb))]
18307pub unsafe fn svldff1sb_gather_s64offset_u64(
18308    pg: svbool_t,
18309    base: *const i8,
18310    offsets: svint64_t,
18311) -> svuint64_t {
18312    svldff1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned()
18313}
18314#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_u64)"]
18316#[doc = "## Safety"]
18317#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18318#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18319#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18320#[inline(always)]
18321#[target_feature(enable = "sve")]
18322#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18323#[cfg_attr(test, assert_instr(ldff1sh))]
18324pub unsafe fn svldff1sh_gather_s64offset_u64(
18325    pg: svbool_t,
18326    base: *const i16,
18327    offsets: svint64_t,
18328) -> svuint64_t {
18329    svldff1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned()
18330}
18331#[doc = "Load 32-bit data and sign-extend, first-faulting"]
18332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_u64)"]
18333#[doc = "## Safety"]
18334#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18335#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18336#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18337#[inline(always)]
18338#[target_feature(enable = "sve")]
18339#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18340#[cfg_attr(test, assert_instr(ldff1sw))]
18341pub unsafe fn svldff1sw_gather_s64offset_u64(
18342    pg: svbool_t,
18343    base: *const i32,
18344    offsets: svint64_t,
18345) -> svuint64_t {
18346    svldff1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned()
18347}
18348#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18349#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_s32)"]
18350#[doc = "## Safety"]
18351#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18352#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18353#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18354#[inline(always)]
18355#[target_feature(enable = "sve")]
18356#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18357#[cfg_attr(test, assert_instr(ldff1sb))]
18358pub unsafe fn svldff1sb_gather_u32offset_s32(
18359    pg: svbool_t,
18360    base: *const i8,
18361    offsets: svuint32_t,
18362) -> svint32_t {
18363    unsafe extern "unadjusted" {
18364        #[cfg_attr(
18365            target_arch = "aarch64",
18366            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8"
18367        )]
18368        fn _svldff1sb_gather_u32offset_s32(
18369            pg: svbool4_t,
18370            base: *const i8,
18371            offsets: svint32_t,
18372        ) -> nxv4i8;
18373    }
18374    crate::intrinsics::simd::simd_cast(_svldff1sb_gather_u32offset_s32(
18375        pg.sve_into(),
18376        base,
18377        offsets.as_signed(),
18378    ))
18379}
18380#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_s32)"]
18382#[doc = "## Safety"]
18383#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18384#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18385#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18386#[inline(always)]
18387#[target_feature(enable = "sve")]
18388#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18389#[cfg_attr(test, assert_instr(ldff1sh))]
18390pub unsafe fn svldff1sh_gather_u32offset_s32(
18391    pg: svbool_t,
18392    base: *const i16,
18393    offsets: svuint32_t,
18394) -> svint32_t {
18395    unsafe extern "unadjusted" {
18396        #[cfg_attr(
18397            target_arch = "aarch64",
18398            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16"
18399        )]
18400        fn _svldff1sh_gather_u32offset_s32(
18401            pg: svbool4_t,
18402            base: *const i16,
18403            offsets: svint32_t,
18404        ) -> nxv4i16;
18405    }
18406    crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u32offset_s32(
18407        pg.sve_into(),
18408        base,
18409        offsets.as_signed(),
18410    ))
18411}
18412#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_u32)"]
18414#[doc = "## Safety"]
18415#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18416#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18417#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18418#[inline(always)]
18419#[target_feature(enable = "sve")]
18420#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18421#[cfg_attr(test, assert_instr(ldff1sb))]
18422pub unsafe fn svldff1sb_gather_u32offset_u32(
18423    pg: svbool_t,
18424    base: *const i8,
18425    offsets: svuint32_t,
18426) -> svuint32_t {
18427    svldff1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned()
18428}
18429#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_u32)"]
18431#[doc = "## Safety"]
18432#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18433#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18434#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18435#[inline(always)]
18436#[target_feature(enable = "sve")]
18437#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18438#[cfg_attr(test, assert_instr(ldff1sh))]
18439pub unsafe fn svldff1sh_gather_u32offset_u32(
18440    pg: svbool_t,
18441    base: *const i16,
18442    offsets: svuint32_t,
18443) -> svuint32_t {
18444    svldff1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned()
18445}
18446#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_s64)"]
18448#[doc = "## Safety"]
18449#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18450#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18451#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18452#[inline(always)]
18453#[target_feature(enable = "sve")]
18454#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18455#[cfg_attr(test, assert_instr(ldff1sb))]
18456pub unsafe fn svldff1sb_gather_u64offset_s64(
18457    pg: svbool_t,
18458    base: *const i8,
18459    offsets: svuint64_t,
18460) -> svint64_t {
18461    svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed())
18462}
18463#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_s64)"]
18465#[doc = "## Safety"]
18466#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18467#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18468#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18469#[inline(always)]
18470#[target_feature(enable = "sve")]
18471#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18472#[cfg_attr(test, assert_instr(ldff1sh))]
18473pub unsafe fn svldff1sh_gather_u64offset_s64(
18474    pg: svbool_t,
18475    base: *const i16,
18476    offsets: svuint64_t,
18477) -> svint64_t {
18478    svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed())
18479}
18480#[doc = "Load 32-bit data and sign-extend, first-faulting"]
18481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_s64)"]
18482#[doc = "## Safety"]
18483#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18484#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18485#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18486#[inline(always)]
18487#[target_feature(enable = "sve")]
18488#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18489#[cfg_attr(test, assert_instr(ldff1sw))]
18490pub unsafe fn svldff1sw_gather_u64offset_s64(
18491    pg: svbool_t,
18492    base: *const i32,
18493    offsets: svuint64_t,
18494) -> svint64_t {
18495    svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed())
18496}
18497#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_u64)"]
18499#[doc = "## Safety"]
18500#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18501#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18502#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18503#[inline(always)]
18504#[target_feature(enable = "sve")]
18505#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18506#[cfg_attr(test, assert_instr(ldff1sb))]
18507pub unsafe fn svldff1sb_gather_u64offset_u64(
18508    pg: svbool_t,
18509    base: *const i8,
18510    offsets: svuint64_t,
18511) -> svuint64_t {
18512    svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
18513}
18514#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_u64)"]
18516#[doc = "## Safety"]
18517#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18518#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18519#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18520#[inline(always)]
18521#[target_feature(enable = "sve")]
18522#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18523#[cfg_attr(test, assert_instr(ldff1sh))]
18524pub unsafe fn svldff1sh_gather_u64offset_u64(
18525    pg: svbool_t,
18526    base: *const i16,
18527    offsets: svuint64_t,
18528) -> svuint64_t {
18529    svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
18530}
18531#[doc = "Load 32-bit data and sign-extend, first-faulting"]
18532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_u64)"]
18533#[doc = "## Safety"]
18534#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18535#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18536#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18537#[inline(always)]
18538#[target_feature(enable = "sve")]
18539#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18540#[cfg_attr(test, assert_instr(ldff1sw))]
18541pub unsafe fn svldff1sw_gather_u64offset_u64(
18542    pg: svbool_t,
18543    base: *const i32,
18544    offsets: svuint64_t,
18545) -> svuint64_t {
18546    svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
18547}
18548#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_s32)"]
18550#[doc = "## Safety"]
18551#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18552#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18553#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18554#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18555#[inline(always)]
18556#[target_feature(enable = "sve")]
18557#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18558#[cfg_attr(test, assert_instr(ldff1sb))]
18559pub unsafe fn svldff1sb_gather_u32base_offset_s32(
18560    pg: svbool_t,
18561    bases: svuint32_t,
18562    offset: i64,
18563) -> svint32_t {
18564    unsafe extern "unadjusted" {
18565        #[cfg_attr(
18566            target_arch = "aarch64",
18567            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32"
18568        )]
18569        fn _svldff1sb_gather_u32base_offset_s32(
18570            pg: svbool4_t,
18571            bases: svint32_t,
18572            offset: i64,
18573        ) -> nxv4i8;
18574    }
18575    crate::intrinsics::simd::simd_cast(_svldff1sb_gather_u32base_offset_s32(
18576        pg.sve_into(),
18577        bases.as_signed(),
18578        offset,
18579    ))
18580}
18581#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_s32)"]
18583#[doc = "## Safety"]
18584#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18585#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18586#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18587#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18588#[inline(always)]
18589#[target_feature(enable = "sve")]
18590#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18591#[cfg_attr(test, assert_instr(ldff1sh))]
18592pub unsafe fn svldff1sh_gather_u32base_offset_s32(
18593    pg: svbool_t,
18594    bases: svuint32_t,
18595    offset: i64,
18596) -> svint32_t {
18597    unsafe extern "unadjusted" {
18598        #[cfg_attr(
18599            target_arch = "aarch64",
18600            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32"
18601        )]
18602        fn _svldff1sh_gather_u32base_offset_s32(
18603            pg: svbool4_t,
18604            bases: svint32_t,
18605            offset: i64,
18606        ) -> nxv4i16;
18607    }
18608    crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u32base_offset_s32(
18609        pg.sve_into(),
18610        bases.as_signed(),
18611        offset,
18612    ))
18613}
18614#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_u32)"]
18616#[doc = "## Safety"]
18617#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18618#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18619#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18620#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18621#[inline(always)]
18622#[target_feature(enable = "sve")]
18623#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18624#[cfg_attr(test, assert_instr(ldff1sb))]
18625pub unsafe fn svldff1sb_gather_u32base_offset_u32(
18626    pg: svbool_t,
18627    bases: svuint32_t,
18628    offset: i64,
18629) -> svuint32_t {
18630    svldff1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
18631}
18632#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_u32)"]
18634#[doc = "## Safety"]
18635#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18636#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18637#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18638#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18639#[inline(always)]
18640#[target_feature(enable = "sve")]
18641#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18642#[cfg_attr(test, assert_instr(ldff1sh))]
18643pub unsafe fn svldff1sh_gather_u32base_offset_u32(
18644    pg: svbool_t,
18645    bases: svuint32_t,
18646    offset: i64,
18647) -> svuint32_t {
18648    svldff1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
18649}
18650#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_s64)"]
18652#[doc = "## Safety"]
18653#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18654#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18655#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18656#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18657#[inline(always)]
18658#[target_feature(enable = "sve")]
18659#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18660#[cfg_attr(test, assert_instr(ldff1sb))]
18661pub unsafe fn svldff1sb_gather_u64base_offset_s64(
18662    pg: svbool_t,
18663    bases: svuint64_t,
18664    offset: i64,
18665) -> svint64_t {
18666    unsafe extern "unadjusted" {
18667        #[cfg_attr(
18668            target_arch = "aarch64",
18669            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64"
18670        )]
18671        fn _svldff1sb_gather_u64base_offset_s64(
18672            pg: svbool2_t,
18673            bases: svint64_t,
18674            offset: i64,
18675        ) -> nxv2i8;
18676    }
18677    crate::intrinsics::simd::simd_cast(_svldff1sb_gather_u64base_offset_s64(
18678        pg.sve_into(),
18679        bases.as_signed(),
18680        offset,
18681    ))
18682}
18683#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_s64)"]
18685#[doc = "## Safety"]
18686#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18687#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18688#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18689#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18690#[inline(always)]
18691#[target_feature(enable = "sve")]
18692#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18693#[cfg_attr(test, assert_instr(ldff1sh))]
18694pub unsafe fn svldff1sh_gather_u64base_offset_s64(
18695    pg: svbool_t,
18696    bases: svuint64_t,
18697    offset: i64,
18698) -> svint64_t {
18699    unsafe extern "unadjusted" {
18700        #[cfg_attr(
18701            target_arch = "aarch64",
18702            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64"
18703        )]
18704        fn _svldff1sh_gather_u64base_offset_s64(
18705            pg: svbool2_t,
18706            bases: svint64_t,
18707            offset: i64,
18708        ) -> nxv2i16;
18709    }
18710    crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u64base_offset_s64(
18711        pg.sve_into(),
18712        bases.as_signed(),
18713        offset,
18714    ))
18715}
18716#[doc = "Load 32-bit data and sign-extend, first-faulting"]
18717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_s64)"]
18718#[doc = "## Safety"]
18719#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18720#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18721#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18722#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18723#[inline(always)]
18724#[target_feature(enable = "sve")]
18725#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18726#[cfg_attr(test, assert_instr(ldff1sw))]
18727pub unsafe fn svldff1sw_gather_u64base_offset_s64(
18728    pg: svbool_t,
18729    bases: svuint64_t,
18730    offset: i64,
18731) -> svint64_t {
18732    unsafe extern "unadjusted" {
18733        #[cfg_attr(
18734            target_arch = "aarch64",
18735            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64"
18736        )]
18737        fn _svldff1sw_gather_u64base_offset_s64(
18738            pg: svbool2_t,
18739            bases: svint64_t,
18740            offset: i64,
18741        ) -> nxv2i32;
18742    }
18743    crate::intrinsics::simd::simd_cast(_svldff1sw_gather_u64base_offset_s64(
18744        pg.sve_into(),
18745        bases.as_signed(),
18746        offset,
18747    ))
18748}
18749#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_u64)"]
18751#[doc = "## Safety"]
18752#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18753#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18754#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18755#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18756#[inline(always)]
18757#[target_feature(enable = "sve")]
18758#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18759#[cfg_attr(test, assert_instr(ldff1sb))]
18760pub unsafe fn svldff1sb_gather_u64base_offset_u64(
18761    pg: svbool_t,
18762    bases: svuint64_t,
18763    offset: i64,
18764) -> svuint64_t {
18765    svldff1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
18766}
18767#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_u64)"]
18769#[doc = "## Safety"]
18770#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18771#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18772#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18773#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18774#[inline(always)]
18775#[target_feature(enable = "sve")]
18776#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18777#[cfg_attr(test, assert_instr(ldff1sh))]
18778pub unsafe fn svldff1sh_gather_u64base_offset_u64(
18779    pg: svbool_t,
18780    bases: svuint64_t,
18781    offset: i64,
18782) -> svuint64_t {
18783    svldff1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
18784}
18785#[doc = "Load 32-bit data and sign-extend, first-faulting"]
18786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_u64)"]
18787#[doc = "## Safety"]
18788#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18789#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18790#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18791#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18792#[inline(always)]
18793#[target_feature(enable = "sve")]
18794#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18795#[cfg_attr(test, assert_instr(ldff1sw))]
18796pub unsafe fn svldff1sw_gather_u64base_offset_u64(
18797    pg: svbool_t,
18798    bases: svuint64_t,
18799    offset: i64,
18800) -> svuint64_t {
18801    svldff1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
18802}
18803#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_s32)"]
18805#[doc = "## Safety"]
18806#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18807#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18808#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18809#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18810#[inline(always)]
18811#[target_feature(enable = "sve")]
18812#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18813#[cfg_attr(test, assert_instr(ldff1sb))]
18814pub unsafe fn svldff1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
18815    svldff1sb_gather_u32base_offset_s32(pg, bases, 0)
18816}
18817#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_s32)"]
18819#[doc = "## Safety"]
18820#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18821#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18822#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18823#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18824#[inline(always)]
18825#[target_feature(enable = "sve")]
18826#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18827#[cfg_attr(test, assert_instr(ldff1sh))]
18828pub unsafe fn svldff1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
18829    svldff1sh_gather_u32base_offset_s32(pg, bases, 0)
18830}
18831#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_u32)"]
18833#[doc = "## Safety"]
18834#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18835#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18836#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18837#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18838#[inline(always)]
18839#[target_feature(enable = "sve")]
18840#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18841#[cfg_attr(test, assert_instr(ldff1sb))]
18842pub unsafe fn svldff1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
18843    svldff1sb_gather_u32base_offset_u32(pg, bases, 0)
18844}
18845#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_u32)"]
18847#[doc = "## Safety"]
18848#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18849#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18850#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18851#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18852#[inline(always)]
18853#[target_feature(enable = "sve")]
18854#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18855#[cfg_attr(test, assert_instr(ldff1sh))]
18856pub unsafe fn svldff1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
18857    svldff1sh_gather_u32base_offset_u32(pg, bases, 0)
18858}
18859#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_s64)"]
18861#[doc = "## Safety"]
18862#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18863#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18864#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18865#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18866#[inline(always)]
18867#[target_feature(enable = "sve")]
18868#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18869#[cfg_attr(test, assert_instr(ldff1sb))]
18870pub unsafe fn svldff1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
18871    svldff1sb_gather_u64base_offset_s64(pg, bases, 0)
18872}
18873#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_s64)"]
18875#[doc = "## Safety"]
18876#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18877#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18878#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18879#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18880#[inline(always)]
18881#[target_feature(enable = "sve")]
18882#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18883#[cfg_attr(test, assert_instr(ldff1sh))]
18884pub unsafe fn svldff1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
18885    svldff1sh_gather_u64base_offset_s64(pg, bases, 0)
18886}
18887#[doc = "Load 32-bit data and sign-extend, first-faulting"]
18888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_s64)"]
18889#[doc = "## Safety"]
18890#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18891#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18892#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18893#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18894#[inline(always)]
18895#[target_feature(enable = "sve")]
18896#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18897#[cfg_attr(test, assert_instr(ldff1sw))]
18898pub unsafe fn svldff1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
18899    svldff1sw_gather_u64base_offset_s64(pg, bases, 0)
18900}
18901#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_u64)"]
18903#[doc = "## Safety"]
18904#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18905#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18906#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18907#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18908#[inline(always)]
18909#[target_feature(enable = "sve")]
18910#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18911#[cfg_attr(test, assert_instr(ldff1sb))]
18912pub unsafe fn svldff1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
18913    svldff1sb_gather_u64base_offset_u64(pg, bases, 0)
18914}
18915#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_u64)"]
18917#[doc = "## Safety"]
18918#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18919#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18920#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18921#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18922#[inline(always)]
18923#[target_feature(enable = "sve")]
18924#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18925#[cfg_attr(test, assert_instr(ldff1sh))]
18926pub unsafe fn svldff1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
18927    svldff1sh_gather_u64base_offset_u64(pg, bases, 0)
18928}
18929#[doc = "Load 32-bit data and sign-extend, first-faulting"]
18930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_u64)"]
18931#[doc = "## Safety"]
18932#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18933#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18934#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18935#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
18936#[inline(always)]
18937#[target_feature(enable = "sve")]
18938#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18939#[cfg_attr(test, assert_instr(ldff1sw))]
18940pub unsafe fn svldff1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
18941    svldff1sw_gather_u64base_offset_u64(pg, bases, 0)
18942}
18943#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s16)"]
18945#[doc = "## Safety"]
18946#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18947#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18948#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18949#[inline(always)]
18950#[target_feature(enable = "sve")]
18951#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18952#[cfg_attr(test, assert_instr(ldff1sb))]
18953pub unsafe fn svldff1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t {
18954    unsafe extern "unadjusted" {
18955        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")]
18956        fn _svldff1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
18957    }
18958    crate::intrinsics::simd::simd_cast(_svldff1sb_s16(pg.sve_into(), base))
18959}
18960#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s32)"]
18962#[doc = "## Safety"]
18963#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18964#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18965#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18966#[inline(always)]
18967#[target_feature(enable = "sve")]
18968#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18969#[cfg_attr(test, assert_instr(ldff1sb))]
18970pub unsafe fn svldff1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t {
18971    unsafe extern "unadjusted" {
18972        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")]
18973        fn _svldff1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
18974    }
18975    crate::intrinsics::simd::simd_cast(_svldff1sb_s32(pg.sve_into(), base))
18976}
18977#[doc = "Load 16-bit data and sign-extend, first-faulting"]
18978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s32)"]
18979#[doc = "## Safety"]
18980#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18981#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18982#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
18983#[inline(always)]
18984#[target_feature(enable = "sve")]
18985#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
18986#[cfg_attr(test, assert_instr(ldff1sh))]
18987pub unsafe fn svldff1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t {
18988    unsafe extern "unadjusted" {
18989        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")]
18990        fn _svldff1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
18991    }
18992    crate::intrinsics::simd::simd_cast(_svldff1sh_s32(pg.sve_into(), base))
18993}
18994#[doc = "Load 8-bit data and sign-extend, first-faulting"]
18995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s64)"]
18996#[doc = "## Safety"]
18997#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18998#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
18999#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19000#[inline(always)]
19001#[target_feature(enable = "sve")]
19002#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19003#[cfg_attr(test, assert_instr(ldff1sb))]
19004pub unsafe fn svldff1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t {
19005    unsafe extern "unadjusted" {
19006        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")]
19007        fn _svldff1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
19008    }
19009    crate::intrinsics::simd::simd_cast(_svldff1sb_s64(pg.sve_into(), base))
19010}
19011#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s64)"]
19013#[doc = "## Safety"]
19014#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19015#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19016#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19017#[inline(always)]
19018#[target_feature(enable = "sve")]
19019#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19020#[cfg_attr(test, assert_instr(ldff1sh))]
19021pub unsafe fn svldff1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t {
19022    unsafe extern "unadjusted" {
19023        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")]
19024        fn _svldff1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
19025    }
19026    crate::intrinsics::simd::simd_cast(_svldff1sh_s64(pg.sve_into(), base))
19027}
19028#[doc = "Load 32-bit data and sign-extend, first-faulting"]
19029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_s64)"]
19030#[doc = "## Safety"]
19031#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19032#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19033#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19034#[inline(always)]
19035#[target_feature(enable = "sve")]
19036#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19037#[cfg_attr(test, assert_instr(ldff1sw))]
19038pub unsafe fn svldff1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t {
19039    unsafe extern "unadjusted" {
19040        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")]
19041        fn _svldff1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
19042    }
19043    crate::intrinsics::simd::simd_cast(_svldff1sw_s64(pg.sve_into(), base))
19044}
19045#[doc = "Load 8-bit data and sign-extend, first-faulting"]
19046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u16)"]
19047#[doc = "## Safety"]
19048#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19049#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19050#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19051#[inline(always)]
19052#[target_feature(enable = "sve")]
19053#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19054#[cfg_attr(test, assert_instr(ldff1sb))]
19055pub unsafe fn svldff1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t {
19056    svldff1sb_s16(pg, base).as_unsigned()
19057}
19058#[doc = "Load 8-bit data and sign-extend, first-faulting"]
19059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u32)"]
19060#[doc = "## Safety"]
19061#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19062#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19063#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19064#[inline(always)]
19065#[target_feature(enable = "sve")]
19066#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19067#[cfg_attr(test, assert_instr(ldff1sb))]
19068pub unsafe fn svldff1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t {
19069    svldff1sb_s32(pg, base).as_unsigned()
19070}
19071#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u32)"]
19073#[doc = "## Safety"]
19074#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19075#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19076#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19077#[inline(always)]
19078#[target_feature(enable = "sve")]
19079#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19080#[cfg_attr(test, assert_instr(ldff1sh))]
19081pub unsafe fn svldff1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t {
19082    svldff1sh_s32(pg, base).as_unsigned()
19083}
19084#[doc = "Load 8-bit data and sign-extend, first-faulting"]
19085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u64)"]
19086#[doc = "## Safety"]
19087#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19088#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19089#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19090#[inline(always)]
19091#[target_feature(enable = "sve")]
19092#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19093#[cfg_attr(test, assert_instr(ldff1sb))]
19094pub unsafe fn svldff1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t {
19095    svldff1sb_s64(pg, base).as_unsigned()
19096}
19097#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19098#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u64)"]
19099#[doc = "## Safety"]
19100#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19101#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19102#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19103#[inline(always)]
19104#[target_feature(enable = "sve")]
19105#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19106#[cfg_attr(test, assert_instr(ldff1sh))]
19107pub unsafe fn svldff1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t {
19108    svldff1sh_s64(pg, base).as_unsigned()
19109}
19110#[doc = "Load 32-bit data and sign-extend, first-faulting"]
19111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_u64)"]
19112#[doc = "## Safety"]
19113#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19114#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19115#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19116#[inline(always)]
19117#[target_feature(enable = "sve")]
19118#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19119#[cfg_attr(test, assert_instr(ldff1sw))]
19120pub unsafe fn svldff1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t {
19121    svldff1sw_s64(pg, base).as_unsigned()
19122}
19123#[doc = "Load 8-bit data and sign-extend, first-faulting"]
19124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s16)"]
19125#[doc = "## Safety"]
19126#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19127#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19128#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19129#[inline(always)]
19130#[target_feature(enable = "sve")]
19131#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19132#[cfg_attr(test, assert_instr(ldff1sb))]
19133pub unsafe fn svldff1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t {
19134    svldff1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize))
19135}
19136#[doc = "Load 8-bit data and sign-extend, first-faulting"]
19137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s32)"]
19138#[doc = "## Safety"]
19139#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19140#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19141#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19142#[inline(always)]
19143#[target_feature(enable = "sve")]
19144#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19145#[cfg_attr(test, assert_instr(ldff1sb))]
19146pub unsafe fn svldff1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t {
19147    svldff1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize))
19148}
19149#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s32)"]
19151#[doc = "## Safety"]
19152#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19153#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19154#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19155#[inline(always)]
19156#[target_feature(enable = "sve")]
19157#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19158#[cfg_attr(test, assert_instr(ldff1sh))]
19159pub unsafe fn svldff1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t {
19160    svldff1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
19161}
19162#[doc = "Load 8-bit data and sign-extend, first-faulting"]
19163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s64)"]
19164#[doc = "## Safety"]
19165#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19166#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19167#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19168#[inline(always)]
19169#[target_feature(enable = "sve")]
19170#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19171#[cfg_attr(test, assert_instr(ldff1sb))]
19172pub unsafe fn svldff1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t {
19173    svldff1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize))
19174}
19175#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s64)"]
19177#[doc = "## Safety"]
19178#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19179#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19180#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19181#[inline(always)]
19182#[target_feature(enable = "sve")]
19183#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19184#[cfg_attr(test, assert_instr(ldff1sh))]
19185pub unsafe fn svldff1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t {
19186    svldff1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
19187}
19188#[doc = "Load 32-bit data and sign-extend, first-faulting"]
19189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_s64)"]
19190#[doc = "## Safety"]
19191#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19192#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19193#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19194#[inline(always)]
19195#[target_feature(enable = "sve")]
19196#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19197#[cfg_attr(test, assert_instr(ldff1sw))]
19198pub unsafe fn svldff1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t {
19199    svldff1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
19200}
19201#[doc = "Load 8-bit data and sign-extend, first-faulting"]
19202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u16)"]
19203#[doc = "## Safety"]
19204#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19205#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19206#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19207#[inline(always)]
19208#[target_feature(enable = "sve")]
19209#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19210#[cfg_attr(test, assert_instr(ldff1sb))]
19211pub unsafe fn svldff1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t {
19212    svldff1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize))
19213}
19214#[doc = "Load 8-bit data and sign-extend, first-faulting"]
19215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u32)"]
19216#[doc = "## Safety"]
19217#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19218#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19219#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19220#[inline(always)]
19221#[target_feature(enable = "sve")]
19222#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19223#[cfg_attr(test, assert_instr(ldff1sb))]
19224pub unsafe fn svldff1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t {
19225    svldff1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize))
19226}
19227#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u32)"]
19229#[doc = "## Safety"]
19230#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19231#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19232#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19233#[inline(always)]
19234#[target_feature(enable = "sve")]
19235#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19236#[cfg_attr(test, assert_instr(ldff1sh))]
19237pub unsafe fn svldff1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t {
19238    svldff1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
19239}
19240#[doc = "Load 8-bit data and sign-extend, first-faulting"]
19241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u64)"]
19242#[doc = "## Safety"]
19243#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19244#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19245#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19246#[inline(always)]
19247#[target_feature(enable = "sve")]
19248#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19249#[cfg_attr(test, assert_instr(ldff1sb))]
19250pub unsafe fn svldff1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t {
19251    svldff1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize))
19252}
19253#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u64)"]
19255#[doc = "## Safety"]
19256#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19257#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19258#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19259#[inline(always)]
19260#[target_feature(enable = "sve")]
19261#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19262#[cfg_attr(test, assert_instr(ldff1sh))]
19263pub unsafe fn svldff1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t {
19264    svldff1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
19265}
19266#[doc = "Load 32-bit data and sign-extend, first-faulting"]
19267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_u64)"]
19268#[doc = "## Safety"]
19269#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
19270#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19271#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19272#[inline(always)]
19273#[target_feature(enable = "sve")]
19274#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19275#[cfg_attr(test, assert_instr(ldff1sw))]
19276pub unsafe fn svldff1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t {
19277    svldff1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
19278}
19279#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_s32)"]
19281#[doc = "## Safety"]
19282#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19283#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19284#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19285#[inline(always)]
19286#[target_feature(enable = "sve")]
19287#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19288#[cfg_attr(test, assert_instr(ldff1sh))]
19289pub unsafe fn svldff1sh_gather_s32index_s32(
19290    pg: svbool_t,
19291    base: *const i16,
19292    indices: svint32_t,
19293) -> svint32_t {
19294    unsafe extern "unadjusted" {
19295        #[cfg_attr(
19296            target_arch = "aarch64",
19297            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16"
19298        )]
19299        fn _svldff1sh_gather_s32index_s32(
19300            pg: svbool4_t,
19301            base: *const i16,
19302            indices: svint32_t,
19303        ) -> nxv4i16;
19304    }
19305    crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s32index_s32(pg.sve_into(), base, indices))
19306}
19307#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_u32)"]
19309#[doc = "## Safety"]
19310#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19311#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19312#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19313#[inline(always)]
19314#[target_feature(enable = "sve")]
19315#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19316#[cfg_attr(test, assert_instr(ldff1sh))]
19317pub unsafe fn svldff1sh_gather_s32index_u32(
19318    pg: svbool_t,
19319    base: *const i16,
19320    indices: svint32_t,
19321) -> svuint32_t {
19322    svldff1sh_gather_s32index_s32(pg, base, indices).as_unsigned()
19323}
19324#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_s64)"]
19326#[doc = "## Safety"]
19327#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19328#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19329#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19330#[inline(always)]
19331#[target_feature(enable = "sve")]
19332#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19333#[cfg_attr(test, assert_instr(ldff1sh))]
19334pub unsafe fn svldff1sh_gather_s64index_s64(
19335    pg: svbool_t,
19336    base: *const i16,
19337    indices: svint64_t,
19338) -> svint64_t {
19339    unsafe extern "unadjusted" {
19340        #[cfg_attr(
19341            target_arch = "aarch64",
19342            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16"
19343        )]
19344        fn _svldff1sh_gather_s64index_s64(
19345            pg: svbool2_t,
19346            base: *const i16,
19347            indices: svint64_t,
19348        ) -> nxv2i16;
19349    }
19350    crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s64index_s64(pg.sve_into(), base, indices))
19351}
19352#[doc = "Load 32-bit data and sign-extend, first-faulting"]
19353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_s64)"]
19354#[doc = "## Safety"]
19355#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19356#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19357#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19358#[inline(always)]
19359#[target_feature(enable = "sve")]
19360#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19361#[cfg_attr(test, assert_instr(ldff1sw))]
19362pub unsafe fn svldff1sw_gather_s64index_s64(
19363    pg: svbool_t,
19364    base: *const i32,
19365    indices: svint64_t,
19366) -> svint64_t {
19367    unsafe extern "unadjusted" {
19368        #[cfg_attr(
19369            target_arch = "aarch64",
19370            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32"
19371        )]
19372        fn _svldff1sw_gather_s64index_s64(
19373            pg: svbool2_t,
19374            base: *const i32,
19375            indices: svint64_t,
19376        ) -> nxv2i32;
19377    }
19378    crate::intrinsics::simd::simd_cast(_svldff1sw_gather_s64index_s64(pg.sve_into(), base, indices))
19379}
19380#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_u64)"]
19382#[doc = "## Safety"]
19383#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19384#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19385#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19386#[inline(always)]
19387#[target_feature(enable = "sve")]
19388#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19389#[cfg_attr(test, assert_instr(ldff1sh))]
19390pub unsafe fn svldff1sh_gather_s64index_u64(
19391    pg: svbool_t,
19392    base: *const i16,
19393    indices: svint64_t,
19394) -> svuint64_t {
19395    svldff1sh_gather_s64index_s64(pg, base, indices).as_unsigned()
19396}
19397#[doc = "Load 32-bit data and sign-extend, first-faulting"]
19398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_u64)"]
19399#[doc = "## Safety"]
19400#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19401#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19402#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19403#[inline(always)]
19404#[target_feature(enable = "sve")]
19405#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19406#[cfg_attr(test, assert_instr(ldff1sw))]
19407pub unsafe fn svldff1sw_gather_s64index_u64(
19408    pg: svbool_t,
19409    base: *const i32,
19410    indices: svint64_t,
19411) -> svuint64_t {
19412    svldff1sw_gather_s64index_s64(pg, base, indices).as_unsigned()
19413}
19414#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_s32)"]
19416#[doc = "## Safety"]
19417#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19418#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19419#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19420#[inline(always)]
19421#[target_feature(enable = "sve")]
19422#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19423#[cfg_attr(test, assert_instr(ldff1sh))]
19424pub unsafe fn svldff1sh_gather_u32index_s32(
19425    pg: svbool_t,
19426    base: *const i16,
19427    indices: svuint32_t,
19428) -> svint32_t {
19429    unsafe extern "unadjusted" {
19430        #[cfg_attr(
19431            target_arch = "aarch64",
19432            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16"
19433        )]
19434        fn _svldff1sh_gather_u32index_s32(
19435            pg: svbool4_t,
19436            base: *const i16,
19437            indices: svint32_t,
19438        ) -> nxv4i16;
19439    }
19440    crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u32index_s32(
19441        pg.sve_into(),
19442        base,
19443        indices.as_signed(),
19444    ))
19445}
19446#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_u32)"]
19448#[doc = "## Safety"]
19449#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19450#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19451#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19452#[inline(always)]
19453#[target_feature(enable = "sve")]
19454#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19455#[cfg_attr(test, assert_instr(ldff1sh))]
19456pub unsafe fn svldff1sh_gather_u32index_u32(
19457    pg: svbool_t,
19458    base: *const i16,
19459    indices: svuint32_t,
19460) -> svuint32_t {
19461    svldff1sh_gather_u32index_s32(pg, base, indices).as_unsigned()
19462}
19463#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_s64)"]
19465#[doc = "## Safety"]
19466#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19467#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19468#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19469#[inline(always)]
19470#[target_feature(enable = "sve")]
19471#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19472#[cfg_attr(test, assert_instr(ldff1sh))]
19473pub unsafe fn svldff1sh_gather_u64index_s64(
19474    pg: svbool_t,
19475    base: *const i16,
19476    indices: svuint64_t,
19477) -> svint64_t {
19478    svldff1sh_gather_s64index_s64(pg, base, indices.as_signed())
19479}
19480#[doc = "Load 32-bit data and sign-extend, first-faulting"]
19481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_s64)"]
19482#[doc = "## Safety"]
19483#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19484#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19485#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19486#[inline(always)]
19487#[target_feature(enable = "sve")]
19488#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19489#[cfg_attr(test, assert_instr(ldff1sw))]
19490pub unsafe fn svldff1sw_gather_u64index_s64(
19491    pg: svbool_t,
19492    base: *const i32,
19493    indices: svuint64_t,
19494) -> svint64_t {
19495    svldff1sw_gather_s64index_s64(pg, base, indices.as_signed())
19496}
19497#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_u64)"]
19499#[doc = "## Safety"]
19500#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19501#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19502#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19503#[inline(always)]
19504#[target_feature(enable = "sve")]
19505#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19506#[cfg_attr(test, assert_instr(ldff1sh))]
19507pub unsafe fn svldff1sh_gather_u64index_u64(
19508    pg: svbool_t,
19509    base: *const i16,
19510    indices: svuint64_t,
19511) -> svuint64_t {
19512    svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
19513}
19514#[doc = "Load 32-bit data and sign-extend, first-faulting"]
19515#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_u64)"]
19516#[doc = "## Safety"]
19517#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19518#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19519#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19520#[inline(always)]
19521#[target_feature(enable = "sve")]
19522#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19523#[cfg_attr(test, assert_instr(ldff1sw))]
19524pub unsafe fn svldff1sw_gather_u64index_u64(
19525    pg: svbool_t,
19526    base: *const i32,
19527    indices: svuint64_t,
19528) -> svuint64_t {
19529    svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
19530}
19531#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_s32)"]
19533#[doc = "## Safety"]
19534#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19535#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19536#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19537#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
19538#[inline(always)]
19539#[target_feature(enable = "sve")]
19540#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19541#[cfg_attr(test, assert_instr(ldff1sh))]
19542pub unsafe fn svldff1sh_gather_u32base_index_s32(
19543    pg: svbool_t,
19544    bases: svuint32_t,
19545    index: i64,
19546) -> svint32_t {
19547    svldff1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
19548}
19549#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_u32)"]
19551#[doc = "## Safety"]
19552#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19553#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19554#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19555#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
19556#[inline(always)]
19557#[target_feature(enable = "sve")]
19558#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19559#[cfg_attr(test, assert_instr(ldff1sh))]
19560pub unsafe fn svldff1sh_gather_u32base_index_u32(
19561    pg: svbool_t,
19562    bases: svuint32_t,
19563    index: i64,
19564) -> svuint32_t {
19565    svldff1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
19566}
19567#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_s64)"]
19569#[doc = "## Safety"]
19570#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19571#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19572#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19573#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
19574#[inline(always)]
19575#[target_feature(enable = "sve")]
19576#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19577#[cfg_attr(test, assert_instr(ldff1sh))]
19578pub unsafe fn svldff1sh_gather_u64base_index_s64(
19579    pg: svbool_t,
19580    bases: svuint64_t,
19581    index: i64,
19582) -> svint64_t {
19583    svldff1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
19584}
19585#[doc = "Load 32-bit data and sign-extend, first-faulting"]
19586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_s64)"]
19587#[doc = "## Safety"]
19588#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19589#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19590#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19591#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
19592#[inline(always)]
19593#[target_feature(enable = "sve")]
19594#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19595#[cfg_attr(test, assert_instr(ldff1sw))]
19596pub unsafe fn svldff1sw_gather_u64base_index_s64(
19597    pg: svbool_t,
19598    bases: svuint64_t,
19599    index: i64,
19600) -> svint64_t {
19601    svldff1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
19602}
19603#[doc = "Load 16-bit data and sign-extend, first-faulting"]
19604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_u64)"]
19605#[doc = "## Safety"]
19606#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19607#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19608#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19609#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
19610#[inline(always)]
19611#[target_feature(enable = "sve")]
19612#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19613#[cfg_attr(test, assert_instr(ldff1sh))]
19614pub unsafe fn svldff1sh_gather_u64base_index_u64(
19615    pg: svbool_t,
19616    bases: svuint64_t,
19617    index: i64,
19618) -> svuint64_t {
19619    svldff1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
19620}
19621#[doc = "Load 32-bit data and sign-extend, first-faulting"]
19622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_u64)"]
19623#[doc = "## Safety"]
19624#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19625#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19626#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19627#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
19628#[inline(always)]
19629#[target_feature(enable = "sve")]
19630#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19631#[cfg_attr(test, assert_instr(ldff1sw))]
19632pub unsafe fn svldff1sw_gather_u64base_index_u64(
19633    pg: svbool_t,
19634    bases: svuint64_t,
19635    index: i64,
19636) -> svuint64_t {
19637    svldff1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
19638}
19639#[doc = "Load 8-bit data and zero-extend, first-faulting"]
19640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_s32)"]
19641#[doc = "## Safety"]
19642#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19643#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19644#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19645#[inline(always)]
19646#[target_feature(enable = "sve")]
19647#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19648#[cfg_attr(test, assert_instr(ldff1b))]
19649pub unsafe fn svldff1ub_gather_s32offset_s32(
19650    pg: svbool_t,
19651    base: *const u8,
19652    offsets: svint32_t,
19653) -> svint32_t {
19654    svldff1ub_gather_s32offset_u32(pg, base, offsets).as_signed()
19655}
19656#[doc = "Load 16-bit data and zero-extend, first-faulting"]
19657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_s32)"]
19658#[doc = "## Safety"]
19659#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19660#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19661#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19662#[inline(always)]
19663#[target_feature(enable = "sve")]
19664#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19665#[cfg_attr(test, assert_instr(ldff1h))]
19666pub unsafe fn svldff1uh_gather_s32offset_s32(
19667    pg: svbool_t,
19668    base: *const u16,
19669    offsets: svint32_t,
19670) -> svint32_t {
19671    svldff1uh_gather_s32offset_u32(pg, base, offsets).as_signed()
19672}
19673#[doc = "Load 8-bit data and zero-extend, first-faulting"]
19674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_u32)"]
19675#[doc = "## Safety"]
19676#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19677#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19678#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19679#[inline(always)]
19680#[target_feature(enable = "sve")]
19681#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19682#[cfg_attr(test, assert_instr(ldff1b))]
19683pub unsafe fn svldff1ub_gather_s32offset_u32(
19684    pg: svbool_t,
19685    base: *const u8,
19686    offsets: svint32_t,
19687) -> svuint32_t {
19688    unsafe extern "unadjusted" {
19689        #[cfg_attr(
19690            target_arch = "aarch64",
19691            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8"
19692        )]
19693        fn _svldff1ub_gather_s32offset_u32(
19694            pg: svbool4_t,
19695            base: *const i8,
19696            offsets: svint32_t,
19697        ) -> nxv4i8;
19698    }
19699    crate::intrinsics::simd::simd_cast::<nxv4u8, _>(
19700        _svldff1ub_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
19701    )
19702}
19703#[doc = "Load 16-bit data and zero-extend, first-faulting"]
19704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_u32)"]
19705#[doc = "## Safety"]
19706#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19707#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19708#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19709#[inline(always)]
19710#[target_feature(enable = "sve")]
19711#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19712#[cfg_attr(test, assert_instr(ldff1h))]
19713pub unsafe fn svldff1uh_gather_s32offset_u32(
19714    pg: svbool_t,
19715    base: *const u16,
19716    offsets: svint32_t,
19717) -> svuint32_t {
19718    unsafe extern "unadjusted" {
19719        #[cfg_attr(
19720            target_arch = "aarch64",
19721            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16"
19722        )]
19723        fn _svldff1uh_gather_s32offset_u32(
19724            pg: svbool4_t,
19725            base: *const i16,
19726            offsets: svint32_t,
19727        ) -> nxv4i16;
19728    }
19729    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
19730        _svldff1uh_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
19731    )
19732}
19733#[doc = "Load 8-bit data and zero-extend, first-faulting"]
19734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_s64)"]
19735#[doc = "## Safety"]
19736#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19737#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19738#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19739#[inline(always)]
19740#[target_feature(enable = "sve")]
19741#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19742#[cfg_attr(test, assert_instr(ldff1b))]
19743pub unsafe fn svldff1ub_gather_s64offset_s64(
19744    pg: svbool_t,
19745    base: *const u8,
19746    offsets: svint64_t,
19747) -> svint64_t {
19748    svldff1ub_gather_s64offset_u64(pg, base, offsets).as_signed()
19749}
19750#[doc = "Load 16-bit data and zero-extend, first-faulting"]
19751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_s64)"]
19752#[doc = "## Safety"]
19753#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19754#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19755#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19756#[inline(always)]
19757#[target_feature(enable = "sve")]
19758#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19759#[cfg_attr(test, assert_instr(ldff1h))]
19760pub unsafe fn svldff1uh_gather_s64offset_s64(
19761    pg: svbool_t,
19762    base: *const u16,
19763    offsets: svint64_t,
19764) -> svint64_t {
19765    svldff1uh_gather_s64offset_u64(pg, base, offsets).as_signed()
19766}
19767#[doc = "Load 32-bit data and zero-extend, first-faulting"]
19768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_s64)"]
19769#[doc = "## Safety"]
19770#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19771#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19772#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19773#[inline(always)]
19774#[target_feature(enable = "sve")]
19775#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19776#[cfg_attr(test, assert_instr(ldff1w))]
19777pub unsafe fn svldff1uw_gather_s64offset_s64(
19778    pg: svbool_t,
19779    base: *const u32,
19780    offsets: svint64_t,
19781) -> svint64_t {
19782    svldff1uw_gather_s64offset_u64(pg, base, offsets).as_signed()
19783}
19784#[doc = "Load 8-bit data and zero-extend, first-faulting"]
19785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_u64)"]
19786#[doc = "## Safety"]
19787#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19788#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19789#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19790#[inline(always)]
19791#[target_feature(enable = "sve")]
19792#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19793#[cfg_attr(test, assert_instr(ldff1b))]
19794pub unsafe fn svldff1ub_gather_s64offset_u64(
19795    pg: svbool_t,
19796    base: *const u8,
19797    offsets: svint64_t,
19798) -> svuint64_t {
19799    unsafe extern "unadjusted" {
19800        #[cfg_attr(
19801            target_arch = "aarch64",
19802            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8"
19803        )]
19804        fn _svldff1ub_gather_s64offset_u64(
19805            pg: svbool2_t,
19806            base: *const i8,
19807            offsets: svint64_t,
19808        ) -> nxv2i8;
19809    }
19810    crate::intrinsics::simd::simd_cast::<nxv2u8, _>(
19811        _svldff1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
19812    )
19813}
19814#[doc = "Load 16-bit data and zero-extend, first-faulting"]
19815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_u64)"]
19816#[doc = "## Safety"]
19817#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19818#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19819#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19820#[inline(always)]
19821#[target_feature(enable = "sve")]
19822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19823#[cfg_attr(test, assert_instr(ldff1h))]
19824pub unsafe fn svldff1uh_gather_s64offset_u64(
19825    pg: svbool_t,
19826    base: *const u16,
19827    offsets: svint64_t,
19828) -> svuint64_t {
19829    unsafe extern "unadjusted" {
19830        #[cfg_attr(
19831            target_arch = "aarch64",
19832            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16"
19833        )]
19834        fn _svldff1uh_gather_s64offset_u64(
19835            pg: svbool2_t,
19836            base: *const i16,
19837            offsets: svint64_t,
19838        ) -> nxv2i16;
19839    }
19840    crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
19841        _svldff1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
19842    )
19843}
19844#[doc = "Load 32-bit data and zero-extend, first-faulting"]
19845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_u64)"]
19846#[doc = "## Safety"]
19847#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19848#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19849#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19850#[inline(always)]
19851#[target_feature(enable = "sve")]
19852#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19853#[cfg_attr(test, assert_instr(ldff1w))]
19854pub unsafe fn svldff1uw_gather_s64offset_u64(
19855    pg: svbool_t,
19856    base: *const u32,
19857    offsets: svint64_t,
19858) -> svuint64_t {
19859    unsafe extern "unadjusted" {
19860        #[cfg_attr(
19861            target_arch = "aarch64",
19862            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32"
19863        )]
19864        fn _svldff1uw_gather_s64offset_u64(
19865            pg: svbool2_t,
19866            base: *const i32,
19867            offsets: svint64_t,
19868        ) -> nxv2i32;
19869    }
19870    crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
19871        _svldff1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(),
19872    )
19873}
19874#[doc = "Load 8-bit data and zero-extend, first-faulting"]
19875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_s32)"]
19876#[doc = "## Safety"]
19877#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19878#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19879#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19880#[inline(always)]
19881#[target_feature(enable = "sve")]
19882#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19883#[cfg_attr(test, assert_instr(ldff1b))]
19884pub unsafe fn svldff1ub_gather_u32offset_s32(
19885    pg: svbool_t,
19886    base: *const u8,
19887    offsets: svuint32_t,
19888) -> svint32_t {
19889    svldff1ub_gather_u32offset_u32(pg, base, offsets).as_signed()
19890}
19891#[doc = "Load 16-bit data and zero-extend, first-faulting"]
19892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_s32)"]
19893#[doc = "## Safety"]
19894#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19895#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19896#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19897#[inline(always)]
19898#[target_feature(enable = "sve")]
19899#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19900#[cfg_attr(test, assert_instr(ldff1h))]
19901pub unsafe fn svldff1uh_gather_u32offset_s32(
19902    pg: svbool_t,
19903    base: *const u16,
19904    offsets: svuint32_t,
19905) -> svint32_t {
19906    svldff1uh_gather_u32offset_u32(pg, base, offsets).as_signed()
19907}
19908#[doc = "Load 8-bit data and zero-extend, first-faulting"]
19909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_u32)"]
19910#[doc = "## Safety"]
19911#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19912#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19913#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19914#[inline(always)]
19915#[target_feature(enable = "sve")]
19916#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19917#[cfg_attr(test, assert_instr(ldff1b))]
19918pub unsafe fn svldff1ub_gather_u32offset_u32(
19919    pg: svbool_t,
19920    base: *const u8,
19921    offsets: svuint32_t,
19922) -> svuint32_t {
19923    unsafe extern "unadjusted" {
19924        #[cfg_attr(
19925            target_arch = "aarch64",
19926            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8"
19927        )]
19928        fn _svldff1ub_gather_u32offset_u32(
19929            pg: svbool4_t,
19930            base: *const i8,
19931            offsets: svint32_t,
19932        ) -> nxv4i8;
19933    }
19934    crate::intrinsics::simd::simd_cast::<nxv4u8, _>(
19935        _svldff1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed())
19936            .as_unsigned(),
19937    )
19938}
19939#[doc = "Load 16-bit data and zero-extend, first-faulting"]
19940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_u32)"]
19941#[doc = "## Safety"]
19942#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19943#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19944#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19945#[inline(always)]
19946#[target_feature(enable = "sve")]
19947#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19948#[cfg_attr(test, assert_instr(ldff1h))]
19949pub unsafe fn svldff1uh_gather_u32offset_u32(
19950    pg: svbool_t,
19951    base: *const u16,
19952    offsets: svuint32_t,
19953) -> svuint32_t {
19954    unsafe extern "unadjusted" {
19955        #[cfg_attr(
19956            target_arch = "aarch64",
19957            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16"
19958        )]
19959        fn _svldff1uh_gather_u32offset_u32(
19960            pg: svbool4_t,
19961            base: *const i16,
19962            offsets: svint32_t,
19963        ) -> nxv4i16;
19964    }
19965    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
19966        _svldff1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed())
19967            .as_unsigned(),
19968    )
19969}
19970#[doc = "Load 8-bit data and zero-extend, first-faulting"]
19971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_s64)"]
19972#[doc = "## Safety"]
19973#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19974#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19975#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19976#[inline(always)]
19977#[target_feature(enable = "sve")]
19978#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19979#[cfg_attr(test, assert_instr(ldff1b))]
19980pub unsafe fn svldff1ub_gather_u64offset_s64(
19981    pg: svbool_t,
19982    base: *const u8,
19983    offsets: svuint64_t,
19984) -> svint64_t {
19985    svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
19986}
19987#[doc = "Load 16-bit data and zero-extend, first-faulting"]
19988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_s64)"]
19989#[doc = "## Safety"]
19990#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19991#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
19992#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
19993#[inline(always)]
19994#[target_feature(enable = "sve")]
19995#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
19996#[cfg_attr(test, assert_instr(ldff1h))]
19997pub unsafe fn svldff1uh_gather_u64offset_s64(
19998    pg: svbool_t,
19999    base: *const u16,
20000    offsets: svuint64_t,
20001) -> svint64_t {
20002    svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
20003}
20004#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_s64)"]
20006#[doc = "## Safety"]
20007#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20008#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20009#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20010#[inline(always)]
20011#[target_feature(enable = "sve")]
20012#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20013#[cfg_attr(test, assert_instr(ldff1w))]
20014pub unsafe fn svldff1uw_gather_u64offset_s64(
20015    pg: svbool_t,
20016    base: *const u32,
20017    offsets: svuint64_t,
20018) -> svint64_t {
20019    svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
20020}
20021#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_u64)"]
20023#[doc = "## Safety"]
20024#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20025#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20026#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20027#[inline(always)]
20028#[target_feature(enable = "sve")]
20029#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20030#[cfg_attr(test, assert_instr(ldff1b))]
20031pub unsafe fn svldff1ub_gather_u64offset_u64(
20032    pg: svbool_t,
20033    base: *const u8,
20034    offsets: svuint64_t,
20035) -> svuint64_t {
20036    svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed())
20037}
20038#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_u64)"]
20040#[doc = "## Safety"]
20041#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20042#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20043#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20044#[inline(always)]
20045#[target_feature(enable = "sve")]
20046#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20047#[cfg_attr(test, assert_instr(ldff1h))]
20048pub unsafe fn svldff1uh_gather_u64offset_u64(
20049    pg: svbool_t,
20050    base: *const u16,
20051    offsets: svuint64_t,
20052) -> svuint64_t {
20053    svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed())
20054}
20055#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_u64)"]
20057#[doc = "## Safety"]
20058#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20059#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20060#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20061#[inline(always)]
20062#[target_feature(enable = "sve")]
20063#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20064#[cfg_attr(test, assert_instr(ldff1w))]
20065pub unsafe fn svldff1uw_gather_u64offset_u64(
20066    pg: svbool_t,
20067    base: *const u32,
20068    offsets: svuint64_t,
20069) -> svuint64_t {
20070    svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed())
20071}
20072#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20073#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_s32)"]
20074#[doc = "## Safety"]
20075#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20076#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20077#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20078#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20079#[inline(always)]
20080#[target_feature(enable = "sve")]
20081#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20082#[cfg_attr(test, assert_instr(ldff1b))]
20083pub unsafe fn svldff1ub_gather_u32base_offset_s32(
20084    pg: svbool_t,
20085    bases: svuint32_t,
20086    offset: i64,
20087) -> svint32_t {
20088    svldff1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed()
20089}
20090#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20091#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_s32)"]
20092#[doc = "## Safety"]
20093#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20094#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20095#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20096#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20097#[inline(always)]
20098#[target_feature(enable = "sve")]
20099#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20100#[cfg_attr(test, assert_instr(ldff1h))]
20101pub unsafe fn svldff1uh_gather_u32base_offset_s32(
20102    pg: svbool_t,
20103    bases: svuint32_t,
20104    offset: i64,
20105) -> svint32_t {
20106    svldff1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed()
20107}
20108#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_u32)"]
20110#[doc = "## Safety"]
20111#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20112#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20113#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20114#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20115#[inline(always)]
20116#[target_feature(enable = "sve")]
20117#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20118#[cfg_attr(test, assert_instr(ldff1b))]
20119pub unsafe fn svldff1ub_gather_u32base_offset_u32(
20120    pg: svbool_t,
20121    bases: svuint32_t,
20122    offset: i64,
20123) -> svuint32_t {
20124    unsafe extern "unadjusted" {
20125        #[cfg_attr(
20126            target_arch = "aarch64",
20127            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32"
20128        )]
20129        fn _svldff1ub_gather_u32base_offset_u32(
20130            pg: svbool4_t,
20131            bases: svint32_t,
20132            offset: i64,
20133        ) -> nxv4i8;
20134    }
20135    crate::intrinsics::simd::simd_cast::<nxv4u8, _>(
20136        _svldff1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset)
20137            .as_unsigned(),
20138    )
20139}
20140#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_u32)"]
20142#[doc = "## Safety"]
20143#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20144#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20145#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20146#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20147#[inline(always)]
20148#[target_feature(enable = "sve")]
20149#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20150#[cfg_attr(test, assert_instr(ldff1h))]
20151pub unsafe fn svldff1uh_gather_u32base_offset_u32(
20152    pg: svbool_t,
20153    bases: svuint32_t,
20154    offset: i64,
20155) -> svuint32_t {
20156    unsafe extern "unadjusted" {
20157        #[cfg_attr(
20158            target_arch = "aarch64",
20159            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32"
20160        )]
20161        fn _svldff1uh_gather_u32base_offset_u32(
20162            pg: svbool4_t,
20163            bases: svint32_t,
20164            offset: i64,
20165        ) -> nxv4i16;
20166    }
20167    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
20168        _svldff1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset)
20169            .as_unsigned(),
20170    )
20171}
20172#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_s64)"]
20174#[doc = "## Safety"]
20175#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20176#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20177#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20178#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20179#[inline(always)]
20180#[target_feature(enable = "sve")]
20181#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20182#[cfg_attr(test, assert_instr(ldff1b))]
20183pub unsafe fn svldff1ub_gather_u64base_offset_s64(
20184    pg: svbool_t,
20185    bases: svuint64_t,
20186    offset: i64,
20187) -> svint64_t {
20188    svldff1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed()
20189}
20190#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_s64)"]
20192#[doc = "## Safety"]
20193#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20194#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20195#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20196#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20197#[inline(always)]
20198#[target_feature(enable = "sve")]
20199#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20200#[cfg_attr(test, assert_instr(ldff1h))]
20201pub unsafe fn svldff1uh_gather_u64base_offset_s64(
20202    pg: svbool_t,
20203    bases: svuint64_t,
20204    offset: i64,
20205) -> svint64_t {
20206    svldff1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed()
20207}
20208#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_s64)"]
20210#[doc = "## Safety"]
20211#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20212#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20213#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20214#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20215#[inline(always)]
20216#[target_feature(enable = "sve")]
20217#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20218#[cfg_attr(test, assert_instr(ldff1w))]
20219pub unsafe fn svldff1uw_gather_u64base_offset_s64(
20220    pg: svbool_t,
20221    bases: svuint64_t,
20222    offset: i64,
20223) -> svint64_t {
20224    svldff1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed()
20225}
20226#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_u64)"]
20228#[doc = "## Safety"]
20229#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20230#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20231#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20232#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20233#[inline(always)]
20234#[target_feature(enable = "sve")]
20235#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20236#[cfg_attr(test, assert_instr(ldff1b))]
20237pub unsafe fn svldff1ub_gather_u64base_offset_u64(
20238    pg: svbool_t,
20239    bases: svuint64_t,
20240    offset: i64,
20241) -> svuint64_t {
20242    unsafe extern "unadjusted" {
20243        #[cfg_attr(
20244            target_arch = "aarch64",
20245            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64"
20246        )]
20247        fn _svldff1ub_gather_u64base_offset_u64(
20248            pg: svbool2_t,
20249            bases: svint64_t,
20250            offset: i64,
20251        ) -> nxv2i8;
20252    }
20253    crate::intrinsics::simd::simd_cast::<nxv2u8, _>(
20254        _svldff1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset)
20255            .as_unsigned(),
20256    )
20257}
20258#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_u64)"]
20260#[doc = "## Safety"]
20261#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20262#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20263#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20264#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20265#[inline(always)]
20266#[target_feature(enable = "sve")]
20267#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20268#[cfg_attr(test, assert_instr(ldff1h))]
20269pub unsafe fn svldff1uh_gather_u64base_offset_u64(
20270    pg: svbool_t,
20271    bases: svuint64_t,
20272    offset: i64,
20273) -> svuint64_t {
20274    unsafe extern "unadjusted" {
20275        #[cfg_attr(
20276            target_arch = "aarch64",
20277            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64"
20278        )]
20279        fn _svldff1uh_gather_u64base_offset_u64(
20280            pg: svbool2_t,
20281            bases: svint64_t,
20282            offset: i64,
20283        ) -> nxv2i16;
20284    }
20285    crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
20286        _svldff1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset)
20287            .as_unsigned(),
20288    )
20289}
20290#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_u64)"]
20292#[doc = "## Safety"]
20293#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20294#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20295#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20296#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20297#[inline(always)]
20298#[target_feature(enable = "sve")]
20299#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20300#[cfg_attr(test, assert_instr(ldff1w))]
20301pub unsafe fn svldff1uw_gather_u64base_offset_u64(
20302    pg: svbool_t,
20303    bases: svuint64_t,
20304    offset: i64,
20305) -> svuint64_t {
20306    unsafe extern "unadjusted" {
20307        #[cfg_attr(
20308            target_arch = "aarch64",
20309            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64"
20310        )]
20311        fn _svldff1uw_gather_u64base_offset_u64(
20312            pg: svbool2_t,
20313            bases: svint64_t,
20314            offset: i64,
20315        ) -> nxv2i32;
20316    }
20317    crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
20318        _svldff1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset)
20319            .as_unsigned(),
20320    )
20321}
20322#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_s32)"]
20324#[doc = "## Safety"]
20325#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20326#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20327#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20328#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20329#[inline(always)]
20330#[target_feature(enable = "sve")]
20331#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20332#[cfg_attr(test, assert_instr(ldff1b))]
20333pub unsafe fn svldff1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
20334    svldff1ub_gather_u32base_offset_s32(pg, bases, 0)
20335}
20336#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_s32)"]
20338#[doc = "## Safety"]
20339#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20340#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20341#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20342#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20343#[inline(always)]
20344#[target_feature(enable = "sve")]
20345#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20346#[cfg_attr(test, assert_instr(ldff1h))]
20347pub unsafe fn svldff1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
20348    svldff1uh_gather_u32base_offset_s32(pg, bases, 0)
20349}
20350#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_u32)"]
20352#[doc = "## Safety"]
20353#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20354#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20355#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20356#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20357#[inline(always)]
20358#[target_feature(enable = "sve")]
20359#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20360#[cfg_attr(test, assert_instr(ldff1b))]
20361pub unsafe fn svldff1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
20362    svldff1ub_gather_u32base_offset_u32(pg, bases, 0)
20363}
20364#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_u32)"]
20366#[doc = "## Safety"]
20367#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20368#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20369#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20370#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20371#[inline(always)]
20372#[target_feature(enable = "sve")]
20373#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20374#[cfg_attr(test, assert_instr(ldff1h))]
20375pub unsafe fn svldff1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
20376    svldff1uh_gather_u32base_offset_u32(pg, bases, 0)
20377}
20378#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_s64)"]
20380#[doc = "## Safety"]
20381#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20382#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20383#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20384#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20385#[inline(always)]
20386#[target_feature(enable = "sve")]
20387#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20388#[cfg_attr(test, assert_instr(ldff1b))]
20389pub unsafe fn svldff1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
20390    svldff1ub_gather_u64base_offset_s64(pg, bases, 0)
20391}
20392#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_s64)"]
20394#[doc = "## Safety"]
20395#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20396#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20397#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20398#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20399#[inline(always)]
20400#[target_feature(enable = "sve")]
20401#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20402#[cfg_attr(test, assert_instr(ldff1h))]
20403pub unsafe fn svldff1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
20404    svldff1uh_gather_u64base_offset_s64(pg, bases, 0)
20405}
20406#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_s64)"]
20408#[doc = "## Safety"]
20409#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20410#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20411#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20412#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20413#[inline(always)]
20414#[target_feature(enable = "sve")]
20415#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20416#[cfg_attr(test, assert_instr(ldff1w))]
20417pub unsafe fn svldff1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
20418    svldff1uw_gather_u64base_offset_s64(pg, bases, 0)
20419}
20420#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_u64)"]
20422#[doc = "## Safety"]
20423#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20424#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20425#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20426#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20427#[inline(always)]
20428#[target_feature(enable = "sve")]
20429#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20430#[cfg_attr(test, assert_instr(ldff1b))]
20431pub unsafe fn svldff1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
20432    svldff1ub_gather_u64base_offset_u64(pg, bases, 0)
20433}
20434#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20435#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_u64)"]
20436#[doc = "## Safety"]
20437#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20438#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20439#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20440#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20441#[inline(always)]
20442#[target_feature(enable = "sve")]
20443#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20444#[cfg_attr(test, assert_instr(ldff1h))]
20445pub unsafe fn svldff1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
20446    svldff1uh_gather_u64base_offset_u64(pg, bases, 0)
20447}
20448#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_u64)"]
20450#[doc = "## Safety"]
20451#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20452#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20453#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20454#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
20455#[inline(always)]
20456#[target_feature(enable = "sve")]
20457#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20458#[cfg_attr(test, assert_instr(ldff1w))]
20459pub unsafe fn svldff1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
20460    svldff1uw_gather_u64base_offset_u64(pg, bases, 0)
20461}
20462#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s16)"]
20464#[doc = "## Safety"]
20465#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20466#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20467#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20468#[inline(always)]
20469#[target_feature(enable = "sve")]
20470#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20471#[cfg_attr(test, assert_instr(ldff1b))]
20472pub unsafe fn svldff1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t {
20473    unsafe extern "unadjusted" {
20474        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")]
20475        fn _svldff1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
20476    }
20477    crate::intrinsics::simd::simd_cast::<nxv8u8, _>(
20478        _svldff1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(),
20479    )
20480}
20481#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s32)"]
20483#[doc = "## Safety"]
20484#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20485#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20486#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20487#[inline(always)]
20488#[target_feature(enable = "sve")]
20489#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20490#[cfg_attr(test, assert_instr(ldff1b))]
20491pub unsafe fn svldff1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t {
20492    unsafe extern "unadjusted" {
20493        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")]
20494        fn _svldff1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
20495    }
20496    crate::intrinsics::simd::simd_cast::<nxv4u8, _>(
20497        _svldff1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(),
20498    )
20499}
20500#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s32)"]
20502#[doc = "## Safety"]
20503#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20504#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20505#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20506#[inline(always)]
20507#[target_feature(enable = "sve")]
20508#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20509#[cfg_attr(test, assert_instr(ldff1h))]
20510pub unsafe fn svldff1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t {
20511    unsafe extern "unadjusted" {
20512        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")]
20513        fn _svldff1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
20514    }
20515    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
20516        _svldff1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(),
20517    )
20518}
20519#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s64)"]
20521#[doc = "## Safety"]
20522#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20523#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20524#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20525#[inline(always)]
20526#[target_feature(enable = "sve")]
20527#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20528#[cfg_attr(test, assert_instr(ldff1b))]
20529pub unsafe fn svldff1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t {
20530    unsafe extern "unadjusted" {
20531        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")]
20532        fn _svldff1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
20533    }
20534    crate::intrinsics::simd::simd_cast::<nxv2u8, _>(
20535        _svldff1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(),
20536    )
20537}
20538#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s64)"]
20540#[doc = "## Safety"]
20541#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20542#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20543#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20544#[inline(always)]
20545#[target_feature(enable = "sve")]
20546#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20547#[cfg_attr(test, assert_instr(ldff1h))]
20548pub unsafe fn svldff1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t {
20549    unsafe extern "unadjusted" {
20550        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")]
20551        fn _svldff1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
20552    }
20553    crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
20554        _svldff1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(),
20555    )
20556}
20557#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_s64)"]
20559#[doc = "## Safety"]
20560#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20561#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20562#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20563#[inline(always)]
20564#[target_feature(enable = "sve")]
20565#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20566#[cfg_attr(test, assert_instr(ldff1w))]
20567pub unsafe fn svldff1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t {
20568    unsafe extern "unadjusted" {
20569        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")]
20570        fn _svldff1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
20571    }
20572    crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
20573        _svldff1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(),
20574    )
20575}
20576#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u16)"]
20578#[doc = "## Safety"]
20579#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20580#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20581#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20582#[inline(always)]
20583#[target_feature(enable = "sve")]
20584#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20585#[cfg_attr(test, assert_instr(ldff1b))]
20586pub unsafe fn svldff1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t {
20587    svldff1ub_s16(pg, base).as_unsigned()
20588}
20589#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u32)"]
20591#[doc = "## Safety"]
20592#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20593#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20594#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20595#[inline(always)]
20596#[target_feature(enable = "sve")]
20597#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20598#[cfg_attr(test, assert_instr(ldff1b))]
20599pub unsafe fn svldff1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t {
20600    svldff1ub_s32(pg, base).as_unsigned()
20601}
20602#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u32)"]
20604#[doc = "## Safety"]
20605#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20606#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20607#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20608#[inline(always)]
20609#[target_feature(enable = "sve")]
20610#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20611#[cfg_attr(test, assert_instr(ldff1h))]
20612pub unsafe fn svldff1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t {
20613    svldff1uh_s32(pg, base).as_unsigned()
20614}
20615#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u64)"]
20617#[doc = "## Safety"]
20618#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20619#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20620#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20621#[inline(always)]
20622#[target_feature(enable = "sve")]
20623#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20624#[cfg_attr(test, assert_instr(ldff1b))]
20625pub unsafe fn svldff1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t {
20626    svldff1ub_s64(pg, base).as_unsigned()
20627}
20628#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u64)"]
20630#[doc = "## Safety"]
20631#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20632#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20633#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20634#[inline(always)]
20635#[target_feature(enable = "sve")]
20636#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20637#[cfg_attr(test, assert_instr(ldff1h))]
20638pub unsafe fn svldff1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t {
20639    svldff1uh_s64(pg, base).as_unsigned()
20640}
20641#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_u64)"]
20643#[doc = "## Safety"]
20644#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20645#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20646#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20647#[inline(always)]
20648#[target_feature(enable = "sve")]
20649#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20650#[cfg_attr(test, assert_instr(ldff1w))]
20651pub unsafe fn svldff1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t {
20652    svldff1uw_s64(pg, base).as_unsigned()
20653}
20654#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s16)"]
20656#[doc = "## Safety"]
20657#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20658#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20659#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20660#[inline(always)]
20661#[target_feature(enable = "sve")]
20662#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20663#[cfg_attr(test, assert_instr(ldff1b))]
20664pub unsafe fn svldff1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t {
20665    svldff1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize))
20666}
20667#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s32)"]
20669#[doc = "## Safety"]
20670#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20671#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20672#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20673#[inline(always)]
20674#[target_feature(enable = "sve")]
20675#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20676#[cfg_attr(test, assert_instr(ldff1b))]
20677pub unsafe fn svldff1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t {
20678    svldff1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize))
20679}
20680#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s32)"]
20682#[doc = "## Safety"]
20683#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20684#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20685#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20686#[inline(always)]
20687#[target_feature(enable = "sve")]
20688#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20689#[cfg_attr(test, assert_instr(ldff1h))]
20690pub unsafe fn svldff1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t {
20691    svldff1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
20692}
20693#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s64)"]
20695#[doc = "## Safety"]
20696#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20697#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20698#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20699#[inline(always)]
20700#[target_feature(enable = "sve")]
20701#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20702#[cfg_attr(test, assert_instr(ldff1b))]
20703pub unsafe fn svldff1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t {
20704    svldff1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize))
20705}
20706#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20707#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s64)"]
20708#[doc = "## Safety"]
20709#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20710#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20711#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20712#[inline(always)]
20713#[target_feature(enable = "sve")]
20714#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20715#[cfg_attr(test, assert_instr(ldff1h))]
20716pub unsafe fn svldff1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t {
20717    svldff1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
20718}
20719#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20720#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_s64)"]
20721#[doc = "## Safety"]
20722#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20723#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20724#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20725#[inline(always)]
20726#[target_feature(enable = "sve")]
20727#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20728#[cfg_attr(test, assert_instr(ldff1w))]
20729pub unsafe fn svldff1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t {
20730    svldff1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
20731}
20732#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u16)"]
20734#[doc = "## Safety"]
20735#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20736#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20737#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20738#[inline(always)]
20739#[target_feature(enable = "sve")]
20740#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20741#[cfg_attr(test, assert_instr(ldff1b))]
20742pub unsafe fn svldff1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t {
20743    svldff1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize))
20744}
20745#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u32)"]
20747#[doc = "## Safety"]
20748#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20749#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20750#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20751#[inline(always)]
20752#[target_feature(enable = "sve")]
20753#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20754#[cfg_attr(test, assert_instr(ldff1b))]
20755pub unsafe fn svldff1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t {
20756    svldff1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize))
20757}
20758#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u32)"]
20760#[doc = "## Safety"]
20761#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20762#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20763#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20764#[inline(always)]
20765#[target_feature(enable = "sve")]
20766#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20767#[cfg_attr(test, assert_instr(ldff1h))]
20768pub unsafe fn svldff1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t {
20769    svldff1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
20770}
20771#[doc = "Load 8-bit data and zero-extend, first-faulting"]
20772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u64)"]
20773#[doc = "## Safety"]
20774#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20775#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20776#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20777#[inline(always)]
20778#[target_feature(enable = "sve")]
20779#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20780#[cfg_attr(test, assert_instr(ldff1b))]
20781pub unsafe fn svldff1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t {
20782    svldff1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize))
20783}
20784#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20785#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u64)"]
20786#[doc = "## Safety"]
20787#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20788#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20789#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20790#[inline(always)]
20791#[target_feature(enable = "sve")]
20792#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20793#[cfg_attr(test, assert_instr(ldff1h))]
20794pub unsafe fn svldff1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t {
20795    svldff1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
20796}
20797#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20798#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_u64)"]
20799#[doc = "## Safety"]
20800#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
20801#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20802#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20803#[inline(always)]
20804#[target_feature(enable = "sve")]
20805#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20806#[cfg_attr(test, assert_instr(ldff1w))]
20807pub unsafe fn svldff1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t {
20808    svldff1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
20809}
20810#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_s32)"]
20812#[doc = "## Safety"]
20813#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20814#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20815#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20816#[inline(always)]
20817#[target_feature(enable = "sve")]
20818#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20819#[cfg_attr(test, assert_instr(ldff1h))]
20820pub unsafe fn svldff1uh_gather_s32index_s32(
20821    pg: svbool_t,
20822    base: *const u16,
20823    indices: svint32_t,
20824) -> svint32_t {
20825    svldff1uh_gather_s32index_u32(pg, base, indices).as_signed()
20826}
20827#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_u32)"]
20829#[doc = "## Safety"]
20830#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20831#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20832#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20833#[inline(always)]
20834#[target_feature(enable = "sve")]
20835#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20836#[cfg_attr(test, assert_instr(ldff1h))]
20837pub unsafe fn svldff1uh_gather_s32index_u32(
20838    pg: svbool_t,
20839    base: *const u16,
20840    indices: svint32_t,
20841) -> svuint32_t {
20842    unsafe extern "unadjusted" {
20843        #[cfg_attr(
20844            target_arch = "aarch64",
20845            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16"
20846        )]
20847        fn _svldff1uh_gather_s32index_u32(
20848            pg: svbool4_t,
20849            base: *const i16,
20850            indices: svint32_t,
20851        ) -> nxv4i16;
20852    }
20853    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
20854        _svldff1uh_gather_s32index_u32(pg.sve_into(), base.as_signed(), indices).as_unsigned(),
20855    )
20856}
20857#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_s64)"]
20859#[doc = "## Safety"]
20860#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20861#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20862#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20863#[inline(always)]
20864#[target_feature(enable = "sve")]
20865#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20866#[cfg_attr(test, assert_instr(ldff1h))]
20867pub unsafe fn svldff1uh_gather_s64index_s64(
20868    pg: svbool_t,
20869    base: *const u16,
20870    indices: svint64_t,
20871) -> svint64_t {
20872    svldff1uh_gather_s64index_u64(pg, base, indices).as_signed()
20873}
20874#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_s64)"]
20876#[doc = "## Safety"]
20877#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20878#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20879#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20880#[inline(always)]
20881#[target_feature(enable = "sve")]
20882#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20883#[cfg_attr(test, assert_instr(ldff1w))]
20884pub unsafe fn svldff1uw_gather_s64index_s64(
20885    pg: svbool_t,
20886    base: *const u32,
20887    indices: svint64_t,
20888) -> svint64_t {
20889    svldff1uw_gather_s64index_u64(pg, base, indices).as_signed()
20890}
20891#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_u64)"]
20893#[doc = "## Safety"]
20894#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20895#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20896#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20897#[inline(always)]
20898#[target_feature(enable = "sve")]
20899#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20900#[cfg_attr(test, assert_instr(ldff1h))]
20901pub unsafe fn svldff1uh_gather_s64index_u64(
20902    pg: svbool_t,
20903    base: *const u16,
20904    indices: svint64_t,
20905) -> svuint64_t {
20906    unsafe extern "unadjusted" {
20907        #[cfg_attr(
20908            target_arch = "aarch64",
20909            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16"
20910        )]
20911        fn _svldff1uh_gather_s64index_u64(
20912            pg: svbool2_t,
20913            base: *const i16,
20914            indices: svint64_t,
20915        ) -> nxv2i16;
20916    }
20917    crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
20918        _svldff1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(),
20919    )
20920}
20921#[doc = "Load 32-bit data and zero-extend, first-faulting"]
20922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_u64)"]
20923#[doc = "## Safety"]
20924#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20925#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20926#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20927#[inline(always)]
20928#[target_feature(enable = "sve")]
20929#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20930#[cfg_attr(test, assert_instr(ldff1w))]
20931pub unsafe fn svldff1uw_gather_s64index_u64(
20932    pg: svbool_t,
20933    base: *const u32,
20934    indices: svint64_t,
20935) -> svuint64_t {
20936    unsafe extern "unadjusted" {
20937        #[cfg_attr(
20938            target_arch = "aarch64",
20939            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32"
20940        )]
20941        fn _svldff1uw_gather_s64index_u64(
20942            pg: svbool2_t,
20943            base: *const i32,
20944            indices: svint64_t,
20945        ) -> nxv2i32;
20946    }
20947    crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
20948        _svldff1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(),
20949    )
20950}
20951#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_s32)"]
20953#[doc = "## Safety"]
20954#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20955#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20956#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20957#[inline(always)]
20958#[target_feature(enable = "sve")]
20959#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20960#[cfg_attr(test, assert_instr(ldff1h))]
20961pub unsafe fn svldff1uh_gather_u32index_s32(
20962    pg: svbool_t,
20963    base: *const u16,
20964    indices: svuint32_t,
20965) -> svint32_t {
20966    svldff1uh_gather_u32index_u32(pg, base, indices).as_signed()
20967}
20968#[doc = "Load 16-bit data and zero-extend, first-faulting"]
20969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_u32)"]
20970#[doc = "## Safety"]
20971#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20972#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
20973#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
20974#[inline(always)]
20975#[target_feature(enable = "sve")]
20976#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
20977#[cfg_attr(test, assert_instr(ldff1h))]
20978pub unsafe fn svldff1uh_gather_u32index_u32(
20979    pg: svbool_t,
20980    base: *const u16,
20981    indices: svuint32_t,
20982) -> svuint32_t {
20983    unsafe extern "unadjusted" {
20984        #[cfg_attr(
20985            target_arch = "aarch64",
20986            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16"
20987        )]
20988        fn _svldff1uh_gather_u32index_u32(
20989            pg: svbool4_t,
20990            base: *const i16,
20991            indices: svint32_t,
20992        ) -> nxv4i16;
20993    }
20994    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
20995        _svldff1uh_gather_u32index_u32(pg.sve_into(), base.as_signed(), indices.as_signed())
20996            .as_unsigned(),
20997    )
20998}
20999#[doc = "Load 16-bit data and zero-extend, first-faulting"]
21000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_s64)"]
21001#[doc = "## Safety"]
21002#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21003#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21004#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21005#[inline(always)]
21006#[target_feature(enable = "sve")]
21007#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21008#[cfg_attr(test, assert_instr(ldff1h))]
21009pub unsafe fn svldff1uh_gather_u64index_s64(
21010    pg: svbool_t,
21011    base: *const u16,
21012    indices: svuint64_t,
21013) -> svint64_t {
21014    svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
21015}
21016#[doc = "Load 32-bit data and zero-extend, first-faulting"]
21017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_s64)"]
21018#[doc = "## Safety"]
21019#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21020#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21021#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21022#[inline(always)]
21023#[target_feature(enable = "sve")]
21024#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21025#[cfg_attr(test, assert_instr(ldff1w))]
21026pub unsafe fn svldff1uw_gather_u64index_s64(
21027    pg: svbool_t,
21028    base: *const u32,
21029    indices: svuint64_t,
21030) -> svint64_t {
21031    svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
21032}
21033#[doc = "Load 16-bit data and zero-extend, first-faulting"]
21034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_u64)"]
21035#[doc = "## Safety"]
21036#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21037#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21038#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21039#[inline(always)]
21040#[target_feature(enable = "sve")]
21041#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21042#[cfg_attr(test, assert_instr(ldff1h))]
21043pub unsafe fn svldff1uh_gather_u64index_u64(
21044    pg: svbool_t,
21045    base: *const u16,
21046    indices: svuint64_t,
21047) -> svuint64_t {
21048    svldff1uh_gather_s64index_u64(pg, base, indices.as_signed())
21049}
21050#[doc = "Load 32-bit data and zero-extend, first-faulting"]
21051#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_u64)"]
21052#[doc = "## Safety"]
21053#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21054#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21055#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21056#[inline(always)]
21057#[target_feature(enable = "sve")]
21058#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21059#[cfg_attr(test, assert_instr(ldff1w))]
21060pub unsafe fn svldff1uw_gather_u64index_u64(
21061    pg: svbool_t,
21062    base: *const u32,
21063    indices: svuint64_t,
21064) -> svuint64_t {
21065    svldff1uw_gather_s64index_u64(pg, base, indices.as_signed())
21066}
21067#[doc = "Load 16-bit data and zero-extend, first-faulting"]
21068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_s32)"]
21069#[doc = "## Safety"]
21070#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21071#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21072#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21073#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
21074#[inline(always)]
21075#[target_feature(enable = "sve")]
21076#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21077#[cfg_attr(test, assert_instr(ldff1h))]
21078pub unsafe fn svldff1uh_gather_u32base_index_s32(
21079    pg: svbool_t,
21080    bases: svuint32_t,
21081    index: i64,
21082) -> svint32_t {
21083    svldff1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
21084}
21085#[doc = "Load 16-bit data and zero-extend, first-faulting"]
21086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_u32)"]
21087#[doc = "## Safety"]
21088#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21089#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21090#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21091#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
21092#[inline(always)]
21093#[target_feature(enable = "sve")]
21094#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21095#[cfg_attr(test, assert_instr(ldff1h))]
21096pub unsafe fn svldff1uh_gather_u32base_index_u32(
21097    pg: svbool_t,
21098    bases: svuint32_t,
21099    index: i64,
21100) -> svuint32_t {
21101    svldff1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
21102}
21103#[doc = "Load 16-bit data and zero-extend, first-faulting"]
21104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_s64)"]
21105#[doc = "## Safety"]
21106#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21107#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21108#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21109#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
21110#[inline(always)]
21111#[target_feature(enable = "sve")]
21112#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21113#[cfg_attr(test, assert_instr(ldff1h))]
21114pub unsafe fn svldff1uh_gather_u64base_index_s64(
21115    pg: svbool_t,
21116    bases: svuint64_t,
21117    index: i64,
21118) -> svint64_t {
21119    svldff1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
21120}
21121#[doc = "Load 32-bit data and zero-extend, first-faulting"]
21122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_s64)"]
21123#[doc = "## Safety"]
21124#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21125#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21126#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21127#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
21128#[inline(always)]
21129#[target_feature(enable = "sve")]
21130#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21131#[cfg_attr(test, assert_instr(ldff1w))]
21132pub unsafe fn svldff1uw_gather_u64base_index_s64(
21133    pg: svbool_t,
21134    bases: svuint64_t,
21135    index: i64,
21136) -> svint64_t {
21137    svldff1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
21138}
21139#[doc = "Load 16-bit data and zero-extend, first-faulting"]
21140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_u64)"]
21141#[doc = "## Safety"]
21142#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21143#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21144#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21145#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
21146#[inline(always)]
21147#[target_feature(enable = "sve")]
21148#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21149#[cfg_attr(test, assert_instr(ldff1h))]
21150pub unsafe fn svldff1uh_gather_u64base_index_u64(
21151    pg: svbool_t,
21152    bases: svuint64_t,
21153    index: i64,
21154) -> svuint64_t {
21155    svldff1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
21156}
21157#[doc = "Load 32-bit data and zero-extend, first-faulting"]
21158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_u64)"]
21159#[doc = "## Safety"]
21160#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21161#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
21162#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21163#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
21164#[inline(always)]
21165#[target_feature(enable = "sve")]
21166#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21167#[cfg_attr(test, assert_instr(ldff1w))]
21168pub unsafe fn svldff1uw_gather_u64base_index_u64(
21169    pg: svbool_t,
21170    bases: svuint64_t,
21171    index: i64,
21172) -> svuint64_t {
21173    svldff1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
21174}
21175#[doc = "Unextended load, non-faulting"]
21176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f32])"]
21177#[doc = "## Safety"]
21178#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21179#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21180#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21181#[inline(always)]
21182#[target_feature(enable = "sve")]
21183#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21184#[cfg_attr(test, assert_instr(ldnf1w))]
21185pub unsafe fn svldnf1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
21186    unsafe extern "unadjusted" {
21187        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4f32")]
21188        fn _svldnf1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
21189    }
21190    _svldnf1_f32(pg.sve_into(), base)
21191}
21192#[doc = "Unextended load, non-faulting"]
21193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f64])"]
21194#[doc = "## Safety"]
21195#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21196#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21197#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21198#[inline(always)]
21199#[target_feature(enable = "sve")]
21200#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21201#[cfg_attr(test, assert_instr(ldnf1d))]
21202pub unsafe fn svldnf1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
21203    unsafe extern "unadjusted" {
21204        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2f64")]
21205        fn _svldnf1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
21206    }
21207    _svldnf1_f64(pg.sve_into(), base)
21208}
21209#[doc = "Unextended load, non-faulting"]
21210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s8])"]
21211#[doc = "## Safety"]
21212#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21213#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21214#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21215#[inline(always)]
21216#[target_feature(enable = "sve")]
21217#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21218#[cfg_attr(test, assert_instr(ldnf1b))]
21219pub unsafe fn svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t {
21220    unsafe extern "unadjusted" {
21221        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv16i8")]
21222        fn _svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t;
21223    }
21224    _svldnf1_s8(pg, base)
21225}
21226#[doc = "Unextended load, non-faulting"]
21227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s16])"]
21228#[doc = "## Safety"]
21229#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21230#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21231#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21232#[inline(always)]
21233#[target_feature(enable = "sve")]
21234#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21235#[cfg_attr(test, assert_instr(ldnf1h))]
21236pub unsafe fn svldnf1_s16(pg: svbool_t, base: *const i16) -> svint16_t {
21237    unsafe extern "unadjusted" {
21238        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i16")]
21239        fn _svldnf1_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
21240    }
21241    _svldnf1_s16(pg.sve_into(), base)
21242}
21243#[doc = "Unextended load, non-faulting"]
21244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s32])"]
21245#[doc = "## Safety"]
21246#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21247#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21248#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21249#[inline(always)]
21250#[target_feature(enable = "sve")]
21251#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21252#[cfg_attr(test, assert_instr(ldnf1w))]
21253pub unsafe fn svldnf1_s32(pg: svbool_t, base: *const i32) -> svint32_t {
21254    unsafe extern "unadjusted" {
21255        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i32")]
21256        fn _svldnf1_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
21257    }
21258    _svldnf1_s32(pg.sve_into(), base)
21259}
21260#[doc = "Unextended load, non-faulting"]
21261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s64])"]
21262#[doc = "## Safety"]
21263#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21264#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21265#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21266#[inline(always)]
21267#[target_feature(enable = "sve")]
21268#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21269#[cfg_attr(test, assert_instr(ldnf1d))]
21270pub unsafe fn svldnf1_s64(pg: svbool_t, base: *const i64) -> svint64_t {
21271    unsafe extern "unadjusted" {
21272        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i64")]
21273        fn _svldnf1_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
21274    }
21275    _svldnf1_s64(pg.sve_into(), base)
21276}
21277#[doc = "Unextended load, non-faulting"]
21278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u8])"]
21279#[doc = "## Safety"]
21280#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21281#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21282#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21283#[inline(always)]
21284#[target_feature(enable = "sve")]
21285#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21286#[cfg_attr(test, assert_instr(ldnf1b))]
21287pub unsafe fn svldnf1_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
21288    svldnf1_s8(pg, base.as_signed()).as_unsigned()
21289}
21290#[doc = "Unextended load, non-faulting"]
21291#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u16])"]
21292#[doc = "## Safety"]
21293#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21294#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21295#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21296#[inline(always)]
21297#[target_feature(enable = "sve")]
21298#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21299#[cfg_attr(test, assert_instr(ldnf1h))]
21300pub unsafe fn svldnf1_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
21301    svldnf1_s16(pg, base.as_signed()).as_unsigned()
21302}
21303#[doc = "Unextended load, non-faulting"]
21304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u32])"]
21305#[doc = "## Safety"]
21306#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21307#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21308#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21309#[inline(always)]
21310#[target_feature(enable = "sve")]
21311#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21312#[cfg_attr(test, assert_instr(ldnf1w))]
21313pub unsafe fn svldnf1_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
21314    svldnf1_s32(pg, base.as_signed()).as_unsigned()
21315}
21316#[doc = "Unextended load, non-faulting"]
21317#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u64])"]
21318#[doc = "## Safety"]
21319#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21320#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21321#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21322#[inline(always)]
21323#[target_feature(enable = "sve")]
21324#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21325#[cfg_attr(test, assert_instr(ldnf1d))]
21326pub unsafe fn svldnf1_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
21327    svldnf1_s64(pg, base.as_signed()).as_unsigned()
21328}
21329#[doc = "Unextended load, non-faulting"]
21330#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f32])"]
21331#[doc = "## Safety"]
21332#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21333#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21334#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21335#[inline(always)]
21336#[target_feature(enable = "sve")]
21337#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21338#[cfg_attr(test, assert_instr(ldnf1w))]
21339pub unsafe fn svldnf1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t {
21340    svldnf1_f32(pg, base.offset(svcntw() as isize * vnum as isize))
21341}
21342#[doc = "Unextended load, non-faulting"]
21343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f64])"]
21344#[doc = "## Safety"]
21345#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21346#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21347#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21348#[inline(always)]
21349#[target_feature(enable = "sve")]
21350#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21351#[cfg_attr(test, assert_instr(ldnf1d))]
21352pub unsafe fn svldnf1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t {
21353    svldnf1_f64(pg, base.offset(svcntd() as isize * vnum as isize))
21354}
21355#[doc = "Unextended load, non-faulting"]
21356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s8])"]
21357#[doc = "## Safety"]
21358#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21359#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21360#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21361#[inline(always)]
21362#[target_feature(enable = "sve")]
21363#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21364#[cfg_attr(test, assert_instr(ldnf1b))]
21365pub unsafe fn svldnf1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t {
21366    svldnf1_s8(pg, base.offset(svcntb() as isize * vnum as isize))
21367}
21368#[doc = "Unextended load, non-faulting"]
21369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s16])"]
21370#[doc = "## Safety"]
21371#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21372#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21373#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21374#[inline(always)]
21375#[target_feature(enable = "sve")]
21376#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21377#[cfg_attr(test, assert_instr(ldnf1h))]
21378pub unsafe fn svldnf1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t {
21379    svldnf1_s16(pg, base.offset(svcnth() as isize * vnum as isize))
21380}
21381#[doc = "Unextended load, non-faulting"]
21382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s32])"]
21383#[doc = "## Safety"]
21384#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21385#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21386#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21387#[inline(always)]
21388#[target_feature(enable = "sve")]
21389#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21390#[cfg_attr(test, assert_instr(ldnf1w))]
21391pub unsafe fn svldnf1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t {
21392    svldnf1_s32(pg, base.offset(svcntw() as isize * vnum as isize))
21393}
21394#[doc = "Unextended load, non-faulting"]
21395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s64])"]
21396#[doc = "## Safety"]
21397#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21398#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21399#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21400#[inline(always)]
21401#[target_feature(enable = "sve")]
21402#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21403#[cfg_attr(test, assert_instr(ldnf1d))]
21404pub unsafe fn svldnf1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t {
21405    svldnf1_s64(pg, base.offset(svcntd() as isize * vnum as isize))
21406}
21407#[doc = "Unextended load, non-faulting"]
21408#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u8])"]
21409#[doc = "## Safety"]
21410#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21411#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21412#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21413#[inline(always)]
21414#[target_feature(enable = "sve")]
21415#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21416#[cfg_attr(test, assert_instr(ldnf1b))]
21417pub unsafe fn svldnf1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t {
21418    svldnf1_u8(pg, base.offset(svcntb() as isize * vnum as isize))
21419}
21420#[doc = "Unextended load, non-faulting"]
21421#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u16])"]
21422#[doc = "## Safety"]
21423#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21424#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21425#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21426#[inline(always)]
21427#[target_feature(enable = "sve")]
21428#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21429#[cfg_attr(test, assert_instr(ldnf1h))]
21430pub unsafe fn svldnf1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t {
21431    svldnf1_u16(pg, base.offset(svcnth() as isize * vnum as isize))
21432}
21433#[doc = "Unextended load, non-faulting"]
21434#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u32])"]
21435#[doc = "## Safety"]
21436#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21437#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21438#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21439#[inline(always)]
21440#[target_feature(enable = "sve")]
21441#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21442#[cfg_attr(test, assert_instr(ldnf1w))]
21443pub unsafe fn svldnf1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t {
21444    svldnf1_u32(pg, base.offset(svcntw() as isize * vnum as isize))
21445}
21446#[doc = "Unextended load, non-faulting"]
21447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u64])"]
21448#[doc = "## Safety"]
21449#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21450#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21451#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21452#[inline(always)]
21453#[target_feature(enable = "sve")]
21454#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21455#[cfg_attr(test, assert_instr(ldnf1d))]
21456pub unsafe fn svldnf1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t {
21457    svldnf1_u64(pg, base.offset(svcntd() as isize * vnum as isize))
21458}
21459#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s16)"]
21461#[doc = "## Safety"]
21462#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21463#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21464#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21465#[inline(always)]
21466#[target_feature(enable = "sve")]
21467#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21468#[cfg_attr(test, assert_instr(ldnf1sb))]
21469pub unsafe fn svldnf1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t {
21470    unsafe extern "unadjusted" {
21471        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")]
21472        fn _svldnf1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
21473    }
21474    crate::intrinsics::simd::simd_cast(_svldnf1sb_s16(pg.sve_into(), base))
21475}
21476#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s32)"]
21478#[doc = "## Safety"]
21479#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21480#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21481#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21482#[inline(always)]
21483#[target_feature(enable = "sve")]
21484#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21485#[cfg_attr(test, assert_instr(ldnf1sb))]
21486pub unsafe fn svldnf1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t {
21487    unsafe extern "unadjusted" {
21488        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")]
21489        fn _svldnf1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
21490    }
21491    crate::intrinsics::simd::simd_cast(_svldnf1sb_s32(pg.sve_into(), base))
21492}
21493#[doc = "Load 16-bit data and sign-extend, non-faulting"]
21494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s32)"]
21495#[doc = "## Safety"]
21496#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21497#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21498#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21499#[inline(always)]
21500#[target_feature(enable = "sve")]
21501#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21502#[cfg_attr(test, assert_instr(ldnf1sh))]
21503pub unsafe fn svldnf1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t {
21504    unsafe extern "unadjusted" {
21505        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")]
21506        fn _svldnf1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
21507    }
21508    crate::intrinsics::simd::simd_cast(_svldnf1sh_s32(pg.sve_into(), base))
21509}
21510#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s64)"]
21512#[doc = "## Safety"]
21513#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21514#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21515#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21516#[inline(always)]
21517#[target_feature(enable = "sve")]
21518#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21519#[cfg_attr(test, assert_instr(ldnf1sb))]
21520pub unsafe fn svldnf1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t {
21521    unsafe extern "unadjusted" {
21522        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")]
21523        fn _svldnf1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
21524    }
21525    crate::intrinsics::simd::simd_cast(_svldnf1sb_s64(pg.sve_into(), base))
21526}
21527#[doc = "Load 16-bit data and sign-extend, non-faulting"]
21528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s64)"]
21529#[doc = "## Safety"]
21530#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21531#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21532#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21533#[inline(always)]
21534#[target_feature(enable = "sve")]
21535#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21536#[cfg_attr(test, assert_instr(ldnf1sh))]
21537pub unsafe fn svldnf1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t {
21538    unsafe extern "unadjusted" {
21539        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")]
21540        fn _svldnf1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
21541    }
21542    crate::intrinsics::simd::simd_cast(_svldnf1sh_s64(pg.sve_into(), base))
21543}
21544#[doc = "Load 32-bit data and sign-extend, non-faulting"]
21545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_s64)"]
21546#[doc = "## Safety"]
21547#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21548#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21549#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21550#[inline(always)]
21551#[target_feature(enable = "sve")]
21552#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21553#[cfg_attr(test, assert_instr(ldnf1sw))]
21554pub unsafe fn svldnf1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t {
21555    unsafe extern "unadjusted" {
21556        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")]
21557        fn _svldnf1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
21558    }
21559    crate::intrinsics::simd::simd_cast(_svldnf1sw_s64(pg.sve_into(), base))
21560}
21561#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u16)"]
21563#[doc = "## Safety"]
21564#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21565#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21566#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21567#[inline(always)]
21568#[target_feature(enable = "sve")]
21569#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21570#[cfg_attr(test, assert_instr(ldnf1sb))]
21571pub unsafe fn svldnf1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t {
21572    svldnf1sb_s16(pg, base).as_unsigned()
21573}
21574#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u32)"]
21576#[doc = "## Safety"]
21577#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21578#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21579#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21580#[inline(always)]
21581#[target_feature(enable = "sve")]
21582#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21583#[cfg_attr(test, assert_instr(ldnf1sb))]
21584pub unsafe fn svldnf1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t {
21585    svldnf1sb_s32(pg, base).as_unsigned()
21586}
21587#[doc = "Load 16-bit data and sign-extend, non-faulting"]
21588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u32)"]
21589#[doc = "## Safety"]
21590#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21591#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21592#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21593#[inline(always)]
21594#[target_feature(enable = "sve")]
21595#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21596#[cfg_attr(test, assert_instr(ldnf1sh))]
21597pub unsafe fn svldnf1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t {
21598    svldnf1sh_s32(pg, base).as_unsigned()
21599}
21600#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21601#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u64)"]
21602#[doc = "## Safety"]
21603#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21604#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21605#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21606#[inline(always)]
21607#[target_feature(enable = "sve")]
21608#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21609#[cfg_attr(test, assert_instr(ldnf1sb))]
21610pub unsafe fn svldnf1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t {
21611    svldnf1sb_s64(pg, base).as_unsigned()
21612}
21613#[doc = "Load 16-bit data and sign-extend, non-faulting"]
21614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u64)"]
21615#[doc = "## Safety"]
21616#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21617#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21618#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21619#[inline(always)]
21620#[target_feature(enable = "sve")]
21621#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21622#[cfg_attr(test, assert_instr(ldnf1sh))]
21623pub unsafe fn svldnf1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t {
21624    svldnf1sh_s64(pg, base).as_unsigned()
21625}
21626#[doc = "Load 32-bit data and sign-extend, non-faulting"]
21627#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_u64)"]
21628#[doc = "## Safety"]
21629#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21630#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21631#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21632#[inline(always)]
21633#[target_feature(enable = "sve")]
21634#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21635#[cfg_attr(test, assert_instr(ldnf1sw))]
21636pub unsafe fn svldnf1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t {
21637    svldnf1sw_s64(pg, base).as_unsigned()
21638}
21639#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s16)"]
21641#[doc = "## Safety"]
21642#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21643#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21644#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21645#[inline(always)]
21646#[target_feature(enable = "sve")]
21647#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21648#[cfg_attr(test, assert_instr(ldnf1sb))]
21649pub unsafe fn svldnf1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t {
21650    svldnf1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize))
21651}
21652#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s32)"]
21654#[doc = "## Safety"]
21655#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21656#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21657#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21658#[inline(always)]
21659#[target_feature(enable = "sve")]
21660#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21661#[cfg_attr(test, assert_instr(ldnf1sb))]
21662pub unsafe fn svldnf1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t {
21663    svldnf1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize))
21664}
21665#[doc = "Load 16-bit data and sign-extend, non-faulting"]
21666#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s32)"]
21667#[doc = "## Safety"]
21668#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21669#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21670#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21671#[inline(always)]
21672#[target_feature(enable = "sve")]
21673#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21674#[cfg_attr(test, assert_instr(ldnf1sh))]
21675pub unsafe fn svldnf1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t {
21676    svldnf1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
21677}
21678#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s64)"]
21680#[doc = "## Safety"]
21681#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21682#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21683#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21684#[inline(always)]
21685#[target_feature(enable = "sve")]
21686#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21687#[cfg_attr(test, assert_instr(ldnf1sb))]
21688pub unsafe fn svldnf1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t {
21689    svldnf1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize))
21690}
21691#[doc = "Load 16-bit data and sign-extend, non-faulting"]
21692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s64)"]
21693#[doc = "## Safety"]
21694#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21695#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21696#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21697#[inline(always)]
21698#[target_feature(enable = "sve")]
21699#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21700#[cfg_attr(test, assert_instr(ldnf1sh))]
21701pub unsafe fn svldnf1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t {
21702    svldnf1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
21703}
21704#[doc = "Load 32-bit data and sign-extend, non-faulting"]
21705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_s64)"]
21706#[doc = "## Safety"]
21707#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21708#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21709#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21710#[inline(always)]
21711#[target_feature(enable = "sve")]
21712#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21713#[cfg_attr(test, assert_instr(ldnf1sw))]
21714pub unsafe fn svldnf1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t {
21715    svldnf1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
21716}
21717#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u16)"]
21719#[doc = "## Safety"]
21720#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21721#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21722#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21723#[inline(always)]
21724#[target_feature(enable = "sve")]
21725#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21726#[cfg_attr(test, assert_instr(ldnf1sb))]
21727pub unsafe fn svldnf1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t {
21728    svldnf1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize))
21729}
21730#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u32)"]
21732#[doc = "## Safety"]
21733#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21734#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21735#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21736#[inline(always)]
21737#[target_feature(enable = "sve")]
21738#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21739#[cfg_attr(test, assert_instr(ldnf1sb))]
21740pub unsafe fn svldnf1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t {
21741    svldnf1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize))
21742}
21743#[doc = "Load 16-bit data and sign-extend, non-faulting"]
21744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u32)"]
21745#[doc = "## Safety"]
21746#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21747#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21748#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21749#[inline(always)]
21750#[target_feature(enable = "sve")]
21751#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21752#[cfg_attr(test, assert_instr(ldnf1sh))]
21753pub unsafe fn svldnf1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t {
21754    svldnf1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
21755}
21756#[doc = "Load 8-bit data and sign-extend, non-faulting"]
21757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u64)"]
21758#[doc = "## Safety"]
21759#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21760#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21761#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21762#[inline(always)]
21763#[target_feature(enable = "sve")]
21764#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21765#[cfg_attr(test, assert_instr(ldnf1sb))]
21766pub unsafe fn svldnf1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t {
21767    svldnf1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize))
21768}
21769#[doc = "Load 16-bit data and sign-extend, non-faulting"]
21770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u64)"]
21771#[doc = "## Safety"]
21772#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21773#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21774#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21775#[inline(always)]
21776#[target_feature(enable = "sve")]
21777#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21778#[cfg_attr(test, assert_instr(ldnf1sh))]
21779pub unsafe fn svldnf1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t {
21780    svldnf1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
21781}
21782#[doc = "Load 32-bit data and sign-extend, non-faulting"]
21783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_u64)"]
21784#[doc = "## Safety"]
21785#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21786#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21787#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21788#[inline(always)]
21789#[target_feature(enable = "sve")]
21790#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21791#[cfg_attr(test, assert_instr(ldnf1sw))]
21792pub unsafe fn svldnf1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t {
21793    svldnf1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
21794}
21795#[doc = "Load 8-bit data and zero-extend, non-faulting"]
21796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s16)"]
21797#[doc = "## Safety"]
21798#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21799#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21800#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21801#[inline(always)]
21802#[target_feature(enable = "sve")]
21803#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21804#[cfg_attr(test, assert_instr(ldnf1b))]
21805pub unsafe fn svldnf1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t {
21806    unsafe extern "unadjusted" {
21807        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")]
21808        fn _svldnf1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
21809    }
21810    crate::intrinsics::simd::simd_cast::<nxv8u8, _>(
21811        _svldnf1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(),
21812    )
21813}
21814#[doc = "Load 8-bit data and zero-extend, non-faulting"]
21815#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s32)"]
21816#[doc = "## Safety"]
21817#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21818#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21819#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21820#[inline(always)]
21821#[target_feature(enable = "sve")]
21822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21823#[cfg_attr(test, assert_instr(ldnf1b))]
21824pub unsafe fn svldnf1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t {
21825    unsafe extern "unadjusted" {
21826        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")]
21827        fn _svldnf1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
21828    }
21829    crate::intrinsics::simd::simd_cast::<nxv4u8, _>(
21830        _svldnf1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(),
21831    )
21832}
21833#[doc = "Load 16-bit data and zero-extend, non-faulting"]
21834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s32)"]
21835#[doc = "## Safety"]
21836#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21837#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21838#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21839#[inline(always)]
21840#[target_feature(enable = "sve")]
21841#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21842#[cfg_attr(test, assert_instr(ldnf1h))]
21843pub unsafe fn svldnf1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t {
21844    unsafe extern "unadjusted" {
21845        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")]
21846        fn _svldnf1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
21847    }
21848    crate::intrinsics::simd::simd_cast::<nxv4u16, _>(
21849        _svldnf1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(),
21850    )
21851}
21852#[doc = "Load 8-bit data and zero-extend, non-faulting"]
21853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s64)"]
21854#[doc = "## Safety"]
21855#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21856#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21857#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21858#[inline(always)]
21859#[target_feature(enable = "sve")]
21860#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21861#[cfg_attr(test, assert_instr(ldnf1b))]
21862pub unsafe fn svldnf1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t {
21863    unsafe extern "unadjusted" {
21864        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")]
21865        fn _svldnf1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
21866    }
21867    crate::intrinsics::simd::simd_cast::<nxv2u8, _>(
21868        _svldnf1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(),
21869    )
21870}
21871#[doc = "Load 16-bit data and zero-extend, non-faulting"]
21872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s64)"]
21873#[doc = "## Safety"]
21874#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21875#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21876#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21877#[inline(always)]
21878#[target_feature(enable = "sve")]
21879#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21880#[cfg_attr(test, assert_instr(ldnf1h))]
21881pub unsafe fn svldnf1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t {
21882    unsafe extern "unadjusted" {
21883        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")]
21884        fn _svldnf1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
21885    }
21886    crate::intrinsics::simd::simd_cast::<nxv2u16, _>(
21887        _svldnf1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(),
21888    )
21889}
21890#[doc = "Load 32-bit data and zero-extend, non-faulting"]
21891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_s64)"]
21892#[doc = "## Safety"]
21893#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21894#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21895#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21896#[inline(always)]
21897#[target_feature(enable = "sve")]
21898#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21899#[cfg_attr(test, assert_instr(ldnf1w))]
21900pub unsafe fn svldnf1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t {
21901    unsafe extern "unadjusted" {
21902        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")]
21903        fn _svldnf1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
21904    }
21905    crate::intrinsics::simd::simd_cast::<nxv2u32, _>(
21906        _svldnf1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(),
21907    )
21908}
21909#[doc = "Load 8-bit data and zero-extend, non-faulting"]
21910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u16)"]
21911#[doc = "## Safety"]
21912#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21913#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21914#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21915#[inline(always)]
21916#[target_feature(enable = "sve")]
21917#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21918#[cfg_attr(test, assert_instr(ldnf1b))]
21919pub unsafe fn svldnf1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t {
21920    svldnf1ub_s16(pg, base).as_unsigned()
21921}
21922#[doc = "Load 8-bit data and zero-extend, non-faulting"]
21923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u32)"]
21924#[doc = "## Safety"]
21925#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21926#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21927#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21928#[inline(always)]
21929#[target_feature(enable = "sve")]
21930#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21931#[cfg_attr(test, assert_instr(ldnf1b))]
21932pub unsafe fn svldnf1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t {
21933    svldnf1ub_s32(pg, base).as_unsigned()
21934}
21935#[doc = "Load 16-bit data and zero-extend, non-faulting"]
21936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u32)"]
21937#[doc = "## Safety"]
21938#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21939#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21940#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21941#[inline(always)]
21942#[target_feature(enable = "sve")]
21943#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21944#[cfg_attr(test, assert_instr(ldnf1h))]
21945pub unsafe fn svldnf1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t {
21946    svldnf1uh_s32(pg, base).as_unsigned()
21947}
21948#[doc = "Load 8-bit data and zero-extend, non-faulting"]
21949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u64)"]
21950#[doc = "## Safety"]
21951#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21952#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21953#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21954#[inline(always)]
21955#[target_feature(enable = "sve")]
21956#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21957#[cfg_attr(test, assert_instr(ldnf1b))]
21958pub unsafe fn svldnf1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t {
21959    svldnf1ub_s64(pg, base).as_unsigned()
21960}
21961#[doc = "Load 16-bit data and zero-extend, non-faulting"]
21962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u64)"]
21963#[doc = "## Safety"]
21964#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21965#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21966#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21967#[inline(always)]
21968#[target_feature(enable = "sve")]
21969#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21970#[cfg_attr(test, assert_instr(ldnf1h))]
21971pub unsafe fn svldnf1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t {
21972    svldnf1uh_s64(pg, base).as_unsigned()
21973}
21974#[doc = "Load 32-bit data and zero-extend, non-faulting"]
21975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_u64)"]
21976#[doc = "## Safety"]
21977#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21978#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21979#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21980#[inline(always)]
21981#[target_feature(enable = "sve")]
21982#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21983#[cfg_attr(test, assert_instr(ldnf1w))]
21984pub unsafe fn svldnf1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t {
21985    svldnf1uw_s64(pg, base).as_unsigned()
21986}
21987#[doc = "Load 8-bit data and zero-extend, non-faulting"]
21988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s16)"]
21989#[doc = "## Safety"]
21990#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
21991#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
21992#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
21993#[inline(always)]
21994#[target_feature(enable = "sve")]
21995#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
21996#[cfg_attr(test, assert_instr(ldnf1b))]
21997pub unsafe fn svldnf1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t {
21998    svldnf1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize))
21999}
22000#[doc = "Load 8-bit data and zero-extend, non-faulting"]
22001#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s32)"]
22002#[doc = "## Safety"]
22003#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22004#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
22005#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
22006#[inline(always)]
22007#[target_feature(enable = "sve")]
22008#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22009#[cfg_attr(test, assert_instr(ldnf1b))]
22010pub unsafe fn svldnf1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t {
22011    svldnf1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize))
22012}
22013#[doc = "Load 16-bit data and zero-extend, non-faulting"]
22014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s32)"]
22015#[doc = "## Safety"]
22016#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22017#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
22018#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
22019#[inline(always)]
22020#[target_feature(enable = "sve")]
22021#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22022#[cfg_attr(test, assert_instr(ldnf1h))]
22023pub unsafe fn svldnf1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t {
22024    svldnf1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
22025}
22026#[doc = "Load 8-bit data and zero-extend, non-faulting"]
22027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s64)"]
22028#[doc = "## Safety"]
22029#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22030#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
22031#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
22032#[inline(always)]
22033#[target_feature(enable = "sve")]
22034#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22035#[cfg_attr(test, assert_instr(ldnf1b))]
22036pub unsafe fn svldnf1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t {
22037    svldnf1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize))
22038}
22039#[doc = "Load 16-bit data and zero-extend, non-faulting"]
22040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s64)"]
22041#[doc = "## Safety"]
22042#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22043#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
22044#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
22045#[inline(always)]
22046#[target_feature(enable = "sve")]
22047#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22048#[cfg_attr(test, assert_instr(ldnf1h))]
22049pub unsafe fn svldnf1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t {
22050    svldnf1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
22051}
22052#[doc = "Load 32-bit data and zero-extend, non-faulting"]
22053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_s64)"]
22054#[doc = "## Safety"]
22055#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22056#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
22057#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
22058#[inline(always)]
22059#[target_feature(enable = "sve")]
22060#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22061#[cfg_attr(test, assert_instr(ldnf1w))]
22062pub unsafe fn svldnf1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t {
22063    svldnf1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
22064}
22065#[doc = "Load 8-bit data and zero-extend, non-faulting"]
22066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u16)"]
22067#[doc = "## Safety"]
22068#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22069#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
22070#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
22071#[inline(always)]
22072#[target_feature(enable = "sve")]
22073#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22074#[cfg_attr(test, assert_instr(ldnf1b))]
22075pub unsafe fn svldnf1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t {
22076    svldnf1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize))
22077}
22078#[doc = "Load 8-bit data and zero-extend, non-faulting"]
22079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u32)"]
22080#[doc = "## Safety"]
22081#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22082#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
22083#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
22084#[inline(always)]
22085#[target_feature(enable = "sve")]
22086#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22087#[cfg_attr(test, assert_instr(ldnf1b))]
22088pub unsafe fn svldnf1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t {
22089    svldnf1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize))
22090}
22091#[doc = "Load 16-bit data and zero-extend, non-faulting"]
22092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u32)"]
22093#[doc = "## Safety"]
22094#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22095#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
22096#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
22097#[inline(always)]
22098#[target_feature(enable = "sve")]
22099#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22100#[cfg_attr(test, assert_instr(ldnf1h))]
22101pub unsafe fn svldnf1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t {
22102    svldnf1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
22103}
22104#[doc = "Load 8-bit data and zero-extend, non-faulting"]
22105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u64)"]
22106#[doc = "## Safety"]
22107#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22108#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
22109#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
22110#[inline(always)]
22111#[target_feature(enable = "sve")]
22112#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22113#[cfg_attr(test, assert_instr(ldnf1b))]
22114pub unsafe fn svldnf1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t {
22115    svldnf1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize))
22116}
22117#[doc = "Load 16-bit data and zero-extend, non-faulting"]
22118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u64)"]
22119#[doc = "## Safety"]
22120#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22121#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
22122#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
22123#[inline(always)]
22124#[target_feature(enable = "sve")]
22125#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22126#[cfg_attr(test, assert_instr(ldnf1h))]
22127pub unsafe fn svldnf1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t {
22128    svldnf1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
22129}
22130#[doc = "Load 32-bit data and zero-extend, non-faulting"]
22131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_u64)"]
22132#[doc = "## Safety"]
22133#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22134#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
22135#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
22136#[inline(always)]
22137#[target_feature(enable = "sve")]
22138#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22139#[cfg_attr(test, assert_instr(ldnf1w))]
22140pub unsafe fn svldnf1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t {
22141    svldnf1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
22142}
22143#[doc = "Unextended load, non-temporal"]
22144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f32])"]
22145#[doc = "## Safety"]
22146#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
22147#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22148#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22149#[inline(always)]
22150#[target_feature(enable = "sve")]
22151#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22152#[cfg_attr(test, assert_instr(ldnt1w))]
22153pub unsafe fn svldnt1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
22154    unsafe extern "unadjusted" {
22155        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4f32")]
22156        fn _svldnt1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
22157    }
22158    _svldnt1_f32(pg.sve_into(), base)
22159}
22160#[doc = "Unextended load, non-temporal"]
22161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f64])"]
22162#[doc = "## Safety"]
22163#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
22164#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22165#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22166#[inline(always)]
22167#[target_feature(enable = "sve")]
22168#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22169#[cfg_attr(test, assert_instr(ldnt1d))]
22170pub unsafe fn svldnt1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
22171    unsafe extern "unadjusted" {
22172        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2f64")]
22173        fn _svldnt1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
22174    }
22175    _svldnt1_f64(pg.sve_into(), base)
22176}
22177#[doc = "Unextended load, non-temporal"]
22178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s8])"]
22179#[doc = "## Safety"]
22180#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
22181#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22182#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22183#[inline(always)]
22184#[target_feature(enable = "sve")]
22185#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22186#[cfg_attr(test, assert_instr(ldnt1b))]
22187pub unsafe fn svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t {
22188    unsafe extern "unadjusted" {
22189        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv16i8")]
22190        fn _svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t;
22191    }
22192    _svldnt1_s8(pg, base)
22193}
22194#[doc = "Unextended load, non-temporal"]
22195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s16])"]
22196#[doc = "## Safety"]
22197#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
22198#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22199#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22200#[inline(always)]
22201#[target_feature(enable = "sve")]
22202#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22203#[cfg_attr(test, assert_instr(ldnt1h))]
22204pub unsafe fn svldnt1_s16(pg: svbool_t, base: *const i16) -> svint16_t {
22205    unsafe extern "unadjusted" {
22206        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv8i16")]
22207        fn _svldnt1_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
22208    }
22209    _svldnt1_s16(pg.sve_into(), base)
22210}
22211#[doc = "Unextended load, non-temporal"]
22212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s32])"]
22213#[doc = "## Safety"]
22214#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
22215#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22216#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22217#[inline(always)]
22218#[target_feature(enable = "sve")]
22219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22220#[cfg_attr(test, assert_instr(ldnt1w))]
22221pub unsafe fn svldnt1_s32(pg: svbool_t, base: *const i32) -> svint32_t {
22222    unsafe extern "unadjusted" {
22223        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4i32")]
22224        fn _svldnt1_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
22225    }
22226    _svldnt1_s32(pg.sve_into(), base)
22227}
22228#[doc = "Unextended load, non-temporal"]
22229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s64])"]
22230#[doc = "## Safety"]
22231#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
22232#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22233#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22234#[inline(always)]
22235#[target_feature(enable = "sve")]
22236#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22237#[cfg_attr(test, assert_instr(ldnt1d))]
22238pub unsafe fn svldnt1_s64(pg: svbool_t, base: *const i64) -> svint64_t {
22239    unsafe extern "unadjusted" {
22240        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2i64")]
22241        fn _svldnt1_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
22242    }
22243    _svldnt1_s64(pg.sve_into(), base)
22244}
22245#[doc = "Unextended load, non-temporal"]
22246#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u8])"]
22247#[doc = "## Safety"]
22248#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
22249#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22250#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22251#[inline(always)]
22252#[target_feature(enable = "sve")]
22253#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22254#[cfg_attr(test, assert_instr(ldnt1b))]
22255pub unsafe fn svldnt1_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
22256    svldnt1_s8(pg, base.as_signed()).as_unsigned()
22257}
22258#[doc = "Unextended load, non-temporal"]
22259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u16])"]
22260#[doc = "## Safety"]
22261#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
22262#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22263#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22264#[inline(always)]
22265#[target_feature(enable = "sve")]
22266#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22267#[cfg_attr(test, assert_instr(ldnt1h))]
22268pub unsafe fn svldnt1_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
22269    svldnt1_s16(pg, base.as_signed()).as_unsigned()
22270}
22271#[doc = "Unextended load, non-temporal"]
22272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u32])"]
22273#[doc = "## Safety"]
22274#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
22275#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22276#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22277#[inline(always)]
22278#[target_feature(enable = "sve")]
22279#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22280#[cfg_attr(test, assert_instr(ldnt1w))]
22281pub unsafe fn svldnt1_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
22282    svldnt1_s32(pg, base.as_signed()).as_unsigned()
22283}
22284#[doc = "Unextended load, non-temporal"]
22285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u64])"]
22286#[doc = "## Safety"]
22287#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
22288#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22289#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22290#[inline(always)]
22291#[target_feature(enable = "sve")]
22292#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22293#[cfg_attr(test, assert_instr(ldnt1d))]
22294pub unsafe fn svldnt1_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
22295    svldnt1_s64(pg, base.as_signed()).as_unsigned()
22296}
22297#[doc = "Unextended load, non-temporal"]
22298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f32])"]
22299#[doc = "## Safety"]
22300#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22301#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22302#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22303#[inline(always)]
22304#[target_feature(enable = "sve")]
22305#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22306#[cfg_attr(test, assert_instr(ldnt1w))]
22307pub unsafe fn svldnt1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t {
22308    svldnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize))
22309}
22310#[doc = "Unextended load, non-temporal"]
22311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f64])"]
22312#[doc = "## Safety"]
22313#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22314#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22315#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22316#[inline(always)]
22317#[target_feature(enable = "sve")]
22318#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22319#[cfg_attr(test, assert_instr(ldnt1d))]
22320pub unsafe fn svldnt1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t {
22321    svldnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize))
22322}
22323#[doc = "Unextended load, non-temporal"]
22324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s8])"]
22325#[doc = "## Safety"]
22326#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22327#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22328#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22329#[inline(always)]
22330#[target_feature(enable = "sve")]
22331#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22332#[cfg_attr(test, assert_instr(ldnt1b))]
22333pub unsafe fn svldnt1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t {
22334    svldnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize))
22335}
22336#[doc = "Unextended load, non-temporal"]
22337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s16])"]
22338#[doc = "## Safety"]
22339#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22340#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22341#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22342#[inline(always)]
22343#[target_feature(enable = "sve")]
22344#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22345#[cfg_attr(test, assert_instr(ldnt1h))]
22346pub unsafe fn svldnt1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t {
22347    svldnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize))
22348}
22349#[doc = "Unextended load, non-temporal"]
22350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s32])"]
22351#[doc = "## Safety"]
22352#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22353#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22354#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22355#[inline(always)]
22356#[target_feature(enable = "sve")]
22357#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22358#[cfg_attr(test, assert_instr(ldnt1w))]
22359pub unsafe fn svldnt1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t {
22360    svldnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize))
22361}
22362#[doc = "Unextended load, non-temporal"]
22363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s64])"]
22364#[doc = "## Safety"]
22365#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22366#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22367#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22368#[inline(always)]
22369#[target_feature(enable = "sve")]
22370#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22371#[cfg_attr(test, assert_instr(ldnt1d))]
22372pub unsafe fn svldnt1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t {
22373    svldnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize))
22374}
22375#[doc = "Unextended load, non-temporal"]
22376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u8])"]
22377#[doc = "## Safety"]
22378#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22379#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22380#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22381#[inline(always)]
22382#[target_feature(enable = "sve")]
22383#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22384#[cfg_attr(test, assert_instr(ldnt1b))]
22385pub unsafe fn svldnt1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t {
22386    svldnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize))
22387}
22388#[doc = "Unextended load, non-temporal"]
22389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u16])"]
22390#[doc = "## Safety"]
22391#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22392#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22393#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22394#[inline(always)]
22395#[target_feature(enable = "sve")]
22396#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22397#[cfg_attr(test, assert_instr(ldnt1h))]
22398pub unsafe fn svldnt1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t {
22399    svldnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize))
22400}
22401#[doc = "Unextended load, non-temporal"]
22402#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u32])"]
22403#[doc = "## Safety"]
22404#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22405#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22406#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22407#[inline(always)]
22408#[target_feature(enable = "sve")]
22409#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22410#[cfg_attr(test, assert_instr(ldnt1w))]
22411pub unsafe fn svldnt1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t {
22412    svldnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize))
22413}
22414#[doc = "Unextended load, non-temporal"]
22415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u64])"]
22416#[doc = "## Safety"]
22417#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
22418#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
22419#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
22420#[inline(always)]
22421#[target_feature(enable = "sve")]
22422#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22423#[cfg_attr(test, assert_instr(ldnt1d))]
22424pub unsafe fn svldnt1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t {
22425    svldnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize))
22426}
22427#[doc = "Count the number of elements in a full vector"]
22428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f32])"]
22429#[inline(always)]
22430#[target_feature(enable = "sve")]
22431#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22432#[cfg_attr(test, assert_instr(cntw))]
22433pub fn svlen_f32(_op: svfloat32_t) -> u64 {
22434    svcntw()
22435}
22436#[doc = "Count the number of elements in a full vector"]
22437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f64])"]
22438#[inline(always)]
22439#[target_feature(enable = "sve")]
22440#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22441#[cfg_attr(test, assert_instr(cntd))]
22442pub fn svlen_f64(_op: svfloat64_t) -> u64 {
22443    svcntd()
22444}
22445#[doc = "Count the number of elements in a full vector"]
22446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s8])"]
22447#[inline(always)]
22448#[target_feature(enable = "sve")]
22449#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22450#[cfg_attr(test, assert_instr(rdvl))]
22451pub fn svlen_s8(_op: svint8_t) -> u64 {
22452    svcntb()
22453}
22454#[doc = "Count the number of elements in a full vector"]
22455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s16])"]
22456#[inline(always)]
22457#[target_feature(enable = "sve")]
22458#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22459#[cfg_attr(test, assert_instr(cnth))]
22460pub fn svlen_s16(_op: svint16_t) -> u64 {
22461    svcnth()
22462}
22463#[doc = "Count the number of elements in a full vector"]
22464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s32])"]
22465#[inline(always)]
22466#[target_feature(enable = "sve")]
22467#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22468#[cfg_attr(test, assert_instr(cntw))]
22469pub fn svlen_s32(_op: svint32_t) -> u64 {
22470    svcntw()
22471}
22472#[doc = "Count the number of elements in a full vector"]
22473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s64])"]
22474#[inline(always)]
22475#[target_feature(enable = "sve")]
22476#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22477#[cfg_attr(test, assert_instr(cntd))]
22478pub fn svlen_s64(_op: svint64_t) -> u64 {
22479    svcntd()
22480}
22481#[doc = "Count the number of elements in a full vector"]
22482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u8])"]
22483#[inline(always)]
22484#[target_feature(enable = "sve")]
22485#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22486#[cfg_attr(test, assert_instr(rdvl))]
22487pub fn svlen_u8(_op: svuint8_t) -> u64 {
22488    svcntb()
22489}
22490#[doc = "Count the number of elements in a full vector"]
22491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u16])"]
22492#[inline(always)]
22493#[target_feature(enable = "sve")]
22494#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22495#[cfg_attr(test, assert_instr(cnth))]
22496pub fn svlen_u16(_op: svuint16_t) -> u64 {
22497    svcnth()
22498}
22499#[doc = "Count the number of elements in a full vector"]
22500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u32])"]
22501#[inline(always)]
22502#[target_feature(enable = "sve")]
22503#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22504#[cfg_attr(test, assert_instr(cntw))]
22505pub fn svlen_u32(_op: svuint32_t) -> u64 {
22506    svcntw()
22507}
22508#[doc = "Count the number of elements in a full vector"]
22509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u64])"]
22510#[inline(always)]
22511#[target_feature(enable = "sve")]
22512#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22513#[cfg_attr(test, assert_instr(cntd))]
22514pub fn svlen_u64(_op: svuint64_t) -> u64 {
22515    svcntd()
22516}
22517#[doc = "Logical shift left"]
22518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_m)"]
22519#[inline(always)]
22520#[target_feature(enable = "sve")]
22521#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22522#[cfg_attr(test, assert_instr(lsl))]
22523pub fn svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
22524    unsafe extern "unadjusted" {
22525        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv16i8")]
22526        fn _svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
22527    }
22528    unsafe { _svlsl_s8_m(pg, op1, op2.as_signed()) }
22529}
22530#[doc = "Logical shift left"]
22531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_m)"]
22532#[inline(always)]
22533#[target_feature(enable = "sve")]
22534#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22535#[cfg_attr(test, assert_instr(lsl))]
22536pub fn svlsl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
22537    svlsl_s8_m(pg, op1, svdup_n_u8(op2))
22538}
22539#[doc = "Logical shift left"]
22540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_x)"]
22541#[inline(always)]
22542#[target_feature(enable = "sve")]
22543#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22544#[cfg_attr(test, assert_instr(lsl))]
22545pub fn svlsl_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
22546    svlsl_s8_m(pg, op1, op2)
22547}
22548#[doc = "Logical shift left"]
22549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_x)"]
22550#[inline(always)]
22551#[target_feature(enable = "sve")]
22552#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22553#[cfg_attr(test, assert_instr(lsl))]
22554pub fn svlsl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
22555    svlsl_s8_x(pg, op1, svdup_n_u8(op2))
22556}
22557#[doc = "Logical shift left"]
22558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_z)"]
22559#[inline(always)]
22560#[target_feature(enable = "sve")]
22561#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22562#[cfg_attr(test, assert_instr(lsl))]
22563pub fn svlsl_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
22564    svlsl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
22565}
22566#[doc = "Logical shift left"]
22567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_z)"]
22568#[inline(always)]
22569#[target_feature(enable = "sve")]
22570#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22571#[cfg_attr(test, assert_instr(lsl))]
22572pub fn svlsl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
22573    svlsl_s8_z(pg, op1, svdup_n_u8(op2))
22574}
22575#[doc = "Logical shift left"]
22576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_m)"]
22577#[inline(always)]
22578#[target_feature(enable = "sve")]
22579#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22580#[cfg_attr(test, assert_instr(lsl))]
22581pub fn svlsl_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
22582    unsafe extern "unadjusted" {
22583        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv8i16")]
22584        fn _svlsl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
22585    }
22586    unsafe { _svlsl_s16_m(pg.sve_into(), op1, op2.as_signed()) }
22587}
22588#[doc = "Logical shift left"]
22589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_m)"]
22590#[inline(always)]
22591#[target_feature(enable = "sve")]
22592#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22593#[cfg_attr(test, assert_instr(lsl))]
22594pub fn svlsl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
22595    svlsl_s16_m(pg, op1, svdup_n_u16(op2))
22596}
22597#[doc = "Logical shift left"]
22598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_x)"]
22599#[inline(always)]
22600#[target_feature(enable = "sve")]
22601#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22602#[cfg_attr(test, assert_instr(lsl))]
22603pub fn svlsl_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
22604    svlsl_s16_m(pg, op1, op2)
22605}
22606#[doc = "Logical shift left"]
22607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_x)"]
22608#[inline(always)]
22609#[target_feature(enable = "sve")]
22610#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22611#[cfg_attr(test, assert_instr(lsl))]
22612pub fn svlsl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
22613    svlsl_s16_x(pg, op1, svdup_n_u16(op2))
22614}
22615#[doc = "Logical shift left"]
22616#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_z)"]
22617#[inline(always)]
22618#[target_feature(enable = "sve")]
22619#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22620#[cfg_attr(test, assert_instr(lsl))]
22621pub fn svlsl_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
22622    svlsl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
22623}
22624#[doc = "Logical shift left"]
22625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_z)"]
22626#[inline(always)]
22627#[target_feature(enable = "sve")]
22628#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22629#[cfg_attr(test, assert_instr(lsl))]
22630pub fn svlsl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
22631    svlsl_s16_z(pg, op1, svdup_n_u16(op2))
22632}
22633#[doc = "Logical shift left"]
22634#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_m)"]
22635#[inline(always)]
22636#[target_feature(enable = "sve")]
22637#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22638#[cfg_attr(test, assert_instr(lsl))]
22639pub fn svlsl_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
22640    unsafe extern "unadjusted" {
22641        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv4i32")]
22642        fn _svlsl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
22643    }
22644    unsafe { _svlsl_s32_m(pg.sve_into(), op1, op2.as_signed()) }
22645}
22646#[doc = "Logical shift left"]
22647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_m)"]
22648#[inline(always)]
22649#[target_feature(enable = "sve")]
22650#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22651#[cfg_attr(test, assert_instr(lsl))]
22652pub fn svlsl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
22653    svlsl_s32_m(pg, op1, svdup_n_u32(op2))
22654}
22655#[doc = "Logical shift left"]
22656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_x)"]
22657#[inline(always)]
22658#[target_feature(enable = "sve")]
22659#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22660#[cfg_attr(test, assert_instr(lsl))]
22661pub fn svlsl_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
22662    svlsl_s32_m(pg, op1, op2)
22663}
22664#[doc = "Logical shift left"]
22665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_x)"]
22666#[inline(always)]
22667#[target_feature(enable = "sve")]
22668#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22669#[cfg_attr(test, assert_instr(lsl))]
22670pub fn svlsl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
22671    svlsl_s32_x(pg, op1, svdup_n_u32(op2))
22672}
22673#[doc = "Logical shift left"]
22674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_z)"]
22675#[inline(always)]
22676#[target_feature(enable = "sve")]
22677#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22678#[cfg_attr(test, assert_instr(lsl))]
22679pub fn svlsl_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
22680    svlsl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
22681}
22682#[doc = "Logical shift left"]
22683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_z)"]
22684#[inline(always)]
22685#[target_feature(enable = "sve")]
22686#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22687#[cfg_attr(test, assert_instr(lsl))]
22688pub fn svlsl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
22689    svlsl_s32_z(pg, op1, svdup_n_u32(op2))
22690}
22691#[doc = "Logical shift left"]
22692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_m)"]
22693#[inline(always)]
22694#[target_feature(enable = "sve")]
22695#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22696#[cfg_attr(test, assert_instr(lsl))]
22697pub fn svlsl_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
22698    unsafe extern "unadjusted" {
22699        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv2i64")]
22700        fn _svlsl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
22701    }
22702    unsafe { _svlsl_s64_m(pg.sve_into(), op1, op2.as_signed()) }
22703}
22704#[doc = "Logical shift left"]
22705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_m)"]
22706#[inline(always)]
22707#[target_feature(enable = "sve")]
22708#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22709#[cfg_attr(test, assert_instr(lsl))]
22710pub fn svlsl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
22711    svlsl_s64_m(pg, op1, svdup_n_u64(op2))
22712}
22713#[doc = "Logical shift left"]
22714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_x)"]
22715#[inline(always)]
22716#[target_feature(enable = "sve")]
22717#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22718#[cfg_attr(test, assert_instr(lsl))]
22719pub fn svlsl_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
22720    svlsl_s64_m(pg, op1, op2)
22721}
22722#[doc = "Logical shift left"]
22723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_x)"]
22724#[inline(always)]
22725#[target_feature(enable = "sve")]
22726#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22727#[cfg_attr(test, assert_instr(lsl))]
22728pub fn svlsl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
22729    svlsl_s64_x(pg, op1, svdup_n_u64(op2))
22730}
22731#[doc = "Logical shift left"]
22732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_z)"]
22733#[inline(always)]
22734#[target_feature(enable = "sve")]
22735#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22736#[cfg_attr(test, assert_instr(lsl))]
22737pub fn svlsl_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
22738    svlsl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
22739}
22740#[doc = "Logical shift left"]
22741#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_z)"]
22742#[inline(always)]
22743#[target_feature(enable = "sve")]
22744#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22745#[cfg_attr(test, assert_instr(lsl))]
22746pub fn svlsl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
22747    svlsl_s64_z(pg, op1, svdup_n_u64(op2))
22748}
22749#[doc = "Logical shift left"]
22750#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_m)"]
22751#[inline(always)]
22752#[target_feature(enable = "sve")]
22753#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22754#[cfg_attr(test, assert_instr(lsl))]
22755pub fn svlsl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
22756    unsafe { svlsl_s8_m(pg, op1.as_signed(), op2).as_unsigned() }
22757}
22758#[doc = "Logical shift left"]
22759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_m)"]
22760#[inline(always)]
22761#[target_feature(enable = "sve")]
22762#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22763#[cfg_attr(test, assert_instr(lsl))]
22764pub fn svlsl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
22765    svlsl_u8_m(pg, op1, svdup_n_u8(op2))
22766}
22767#[doc = "Logical shift left"]
22768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_x)"]
22769#[inline(always)]
22770#[target_feature(enable = "sve")]
22771#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22772#[cfg_attr(test, assert_instr(lsl))]
22773pub fn svlsl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
22774    svlsl_u8_m(pg, op1, op2)
22775}
22776#[doc = "Logical shift left"]
22777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_x)"]
22778#[inline(always)]
22779#[target_feature(enable = "sve")]
22780#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22781#[cfg_attr(test, assert_instr(lsl))]
22782pub fn svlsl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
22783    svlsl_u8_x(pg, op1, svdup_n_u8(op2))
22784}
22785#[doc = "Logical shift left"]
22786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_z)"]
22787#[inline(always)]
22788#[target_feature(enable = "sve")]
22789#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22790#[cfg_attr(test, assert_instr(lsl))]
22791pub fn svlsl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
22792    svlsl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
22793}
22794#[doc = "Logical shift left"]
22795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_z)"]
22796#[inline(always)]
22797#[target_feature(enable = "sve")]
22798#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22799#[cfg_attr(test, assert_instr(lsl))]
22800pub fn svlsl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
22801    svlsl_u8_z(pg, op1, svdup_n_u8(op2))
22802}
22803#[doc = "Logical shift left"]
22804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_m)"]
22805#[inline(always)]
22806#[target_feature(enable = "sve")]
22807#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22808#[cfg_attr(test, assert_instr(lsl))]
22809pub fn svlsl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
22810    unsafe { svlsl_s16_m(pg, op1.as_signed(), op2).as_unsigned() }
22811}
22812#[doc = "Logical shift left"]
22813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_m)"]
22814#[inline(always)]
22815#[target_feature(enable = "sve")]
22816#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22817#[cfg_attr(test, assert_instr(lsl))]
22818pub fn svlsl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
22819    svlsl_u16_m(pg, op1, svdup_n_u16(op2))
22820}
22821#[doc = "Logical shift left"]
22822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_x)"]
22823#[inline(always)]
22824#[target_feature(enable = "sve")]
22825#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22826#[cfg_attr(test, assert_instr(lsl))]
22827pub fn svlsl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
22828    svlsl_u16_m(pg, op1, op2)
22829}
22830#[doc = "Logical shift left"]
22831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_x)"]
22832#[inline(always)]
22833#[target_feature(enable = "sve")]
22834#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22835#[cfg_attr(test, assert_instr(lsl))]
22836pub fn svlsl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
22837    svlsl_u16_x(pg, op1, svdup_n_u16(op2))
22838}
22839#[doc = "Logical shift left"]
22840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_z)"]
22841#[inline(always)]
22842#[target_feature(enable = "sve")]
22843#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22844#[cfg_attr(test, assert_instr(lsl))]
22845pub fn svlsl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
22846    svlsl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
22847}
22848#[doc = "Logical shift left"]
22849#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_z)"]
22850#[inline(always)]
22851#[target_feature(enable = "sve")]
22852#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22853#[cfg_attr(test, assert_instr(lsl))]
22854pub fn svlsl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
22855    svlsl_u16_z(pg, op1, svdup_n_u16(op2))
22856}
22857#[doc = "Logical shift left"]
22858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_m)"]
22859#[inline(always)]
22860#[target_feature(enable = "sve")]
22861#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22862#[cfg_attr(test, assert_instr(lsl))]
22863pub fn svlsl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
22864    unsafe { svlsl_s32_m(pg, op1.as_signed(), op2).as_unsigned() }
22865}
22866#[doc = "Logical shift left"]
22867#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_m)"]
22868#[inline(always)]
22869#[target_feature(enable = "sve")]
22870#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22871#[cfg_attr(test, assert_instr(lsl))]
22872pub fn svlsl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
22873    svlsl_u32_m(pg, op1, svdup_n_u32(op2))
22874}
22875#[doc = "Logical shift left"]
22876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_x)"]
22877#[inline(always)]
22878#[target_feature(enable = "sve")]
22879#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22880#[cfg_attr(test, assert_instr(lsl))]
22881pub fn svlsl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
22882    svlsl_u32_m(pg, op1, op2)
22883}
22884#[doc = "Logical shift left"]
22885#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_x)"]
22886#[inline(always)]
22887#[target_feature(enable = "sve")]
22888#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22889#[cfg_attr(test, assert_instr(lsl))]
22890pub fn svlsl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
22891    svlsl_u32_x(pg, op1, svdup_n_u32(op2))
22892}
22893#[doc = "Logical shift left"]
22894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_z)"]
22895#[inline(always)]
22896#[target_feature(enable = "sve")]
22897#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22898#[cfg_attr(test, assert_instr(lsl))]
22899pub fn svlsl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
22900    svlsl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
22901}
22902#[doc = "Logical shift left"]
22903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_z)"]
22904#[inline(always)]
22905#[target_feature(enable = "sve")]
22906#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22907#[cfg_attr(test, assert_instr(lsl))]
22908pub fn svlsl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
22909    svlsl_u32_z(pg, op1, svdup_n_u32(op2))
22910}
22911#[doc = "Logical shift left"]
22912#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_m)"]
22913#[inline(always)]
22914#[target_feature(enable = "sve")]
22915#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22916#[cfg_attr(test, assert_instr(lsl))]
22917pub fn svlsl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
22918    unsafe { svlsl_s64_m(pg, op1.as_signed(), op2).as_unsigned() }
22919}
22920#[doc = "Logical shift left"]
22921#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_m)"]
22922#[inline(always)]
22923#[target_feature(enable = "sve")]
22924#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22925#[cfg_attr(test, assert_instr(lsl))]
22926pub fn svlsl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
22927    svlsl_u64_m(pg, op1, svdup_n_u64(op2))
22928}
22929#[doc = "Logical shift left"]
22930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_x)"]
22931#[inline(always)]
22932#[target_feature(enable = "sve")]
22933#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22934#[cfg_attr(test, assert_instr(lsl))]
22935pub fn svlsl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
22936    svlsl_u64_m(pg, op1, op2)
22937}
22938#[doc = "Logical shift left"]
22939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_x)"]
22940#[inline(always)]
22941#[target_feature(enable = "sve")]
22942#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22943#[cfg_attr(test, assert_instr(lsl))]
22944pub fn svlsl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
22945    svlsl_u64_x(pg, op1, svdup_n_u64(op2))
22946}
22947#[doc = "Logical shift left"]
22948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_z)"]
22949#[inline(always)]
22950#[target_feature(enable = "sve")]
22951#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22952#[cfg_attr(test, assert_instr(lsl))]
22953pub fn svlsl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
22954    svlsl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
22955}
22956#[doc = "Logical shift left"]
22957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_z)"]
22958#[inline(always)]
22959#[target_feature(enable = "sve")]
22960#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22961#[cfg_attr(test, assert_instr(lsl))]
22962pub fn svlsl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
22963    svlsl_u64_z(pg, op1, svdup_n_u64(op2))
22964}
22965#[doc = "Logical shift left"]
22966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_m)"]
22967#[inline(always)]
22968#[target_feature(enable = "sve")]
22969#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22970#[cfg_attr(test, assert_instr(lsl))]
22971pub fn svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
22972    unsafe extern "unadjusted" {
22973        #[cfg_attr(
22974            target_arch = "aarch64",
22975            link_name = "llvm.aarch64.sve.lsl.wide.nxv16i8"
22976        )]
22977        fn _svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t;
22978    }
22979    unsafe { _svlsl_wide_s8_m(pg, op1, op2.as_signed()) }
22980}
22981#[doc = "Logical shift left"]
22982#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_m)"]
22983#[inline(always)]
22984#[target_feature(enable = "sve")]
22985#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22986#[cfg_attr(test, assert_instr(lsl))]
22987pub fn svlsl_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
22988    svlsl_wide_s8_m(pg, op1, svdup_n_u64(op2))
22989}
22990#[doc = "Logical shift left"]
22991#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_x)"]
22992#[inline(always)]
22993#[target_feature(enable = "sve")]
22994#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
22995#[cfg_attr(test, assert_instr(lsl))]
22996pub fn svlsl_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
22997    svlsl_wide_s8_m(pg, op1, op2)
22998}
22999#[doc = "Logical shift left"]
23000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_x)"]
23001#[inline(always)]
23002#[target_feature(enable = "sve")]
23003#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23004#[cfg_attr(test, assert_instr(lsl))]
23005pub fn svlsl_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
23006    svlsl_wide_s8_x(pg, op1, svdup_n_u64(op2))
23007}
23008#[doc = "Logical shift left"]
23009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_z)"]
23010#[inline(always)]
23011#[target_feature(enable = "sve")]
23012#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23013#[cfg_attr(test, assert_instr(lsl))]
23014pub fn svlsl_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
23015    svlsl_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
23016}
23017#[doc = "Logical shift left"]
23018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_z)"]
23019#[inline(always)]
23020#[target_feature(enable = "sve")]
23021#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23022#[cfg_attr(test, assert_instr(lsl))]
23023pub fn svlsl_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
23024    svlsl_wide_s8_z(pg, op1, svdup_n_u64(op2))
23025}
23026#[doc = "Logical shift left"]
23027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_m)"]
23028#[inline(always)]
23029#[target_feature(enable = "sve")]
23030#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23031#[cfg_attr(test, assert_instr(lsl))]
23032pub fn svlsl_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
23033    unsafe extern "unadjusted" {
23034        #[cfg_attr(
23035            target_arch = "aarch64",
23036            link_name = "llvm.aarch64.sve.lsl.wide.nxv8i16"
23037        )]
23038        fn _svlsl_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t;
23039    }
23040    unsafe { _svlsl_wide_s16_m(pg.sve_into(), op1, op2.as_signed()) }
23041}
23042#[doc = "Logical shift left"]
23043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_m)"]
23044#[inline(always)]
23045#[target_feature(enable = "sve")]
23046#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23047#[cfg_attr(test, assert_instr(lsl))]
23048pub fn svlsl_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
23049    svlsl_wide_s16_m(pg, op1, svdup_n_u64(op2))
23050}
23051#[doc = "Logical shift left"]
23052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_x)"]
23053#[inline(always)]
23054#[target_feature(enable = "sve")]
23055#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23056#[cfg_attr(test, assert_instr(lsl))]
23057pub fn svlsl_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
23058    svlsl_wide_s16_m(pg, op1, op2)
23059}
23060#[doc = "Logical shift left"]
23061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_x)"]
23062#[inline(always)]
23063#[target_feature(enable = "sve")]
23064#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23065#[cfg_attr(test, assert_instr(lsl))]
23066pub fn svlsl_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
23067    svlsl_wide_s16_x(pg, op1, svdup_n_u64(op2))
23068}
23069#[doc = "Logical shift left"]
23070#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_z)"]
23071#[inline(always)]
23072#[target_feature(enable = "sve")]
23073#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23074#[cfg_attr(test, assert_instr(lsl))]
23075pub fn svlsl_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
23076    svlsl_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
23077}
23078#[doc = "Logical shift left"]
23079#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_z)"]
23080#[inline(always)]
23081#[target_feature(enable = "sve")]
23082#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23083#[cfg_attr(test, assert_instr(lsl))]
23084pub fn svlsl_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
23085    svlsl_wide_s16_z(pg, op1, svdup_n_u64(op2))
23086}
23087#[doc = "Logical shift left"]
23088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_m)"]
23089#[inline(always)]
23090#[target_feature(enable = "sve")]
23091#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23092#[cfg_attr(test, assert_instr(lsl))]
23093pub fn svlsl_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
23094    unsafe extern "unadjusted" {
23095        #[cfg_attr(
23096            target_arch = "aarch64",
23097            link_name = "llvm.aarch64.sve.lsl.wide.nxv4i32"
23098        )]
23099        fn _svlsl_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t;
23100    }
23101    unsafe { _svlsl_wide_s32_m(pg.sve_into(), op1, op2.as_signed()) }
23102}
23103#[doc = "Logical shift left"]
23104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_m)"]
23105#[inline(always)]
23106#[target_feature(enable = "sve")]
23107#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23108#[cfg_attr(test, assert_instr(lsl))]
23109pub fn svlsl_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
23110    svlsl_wide_s32_m(pg, op1, svdup_n_u64(op2))
23111}
23112#[doc = "Logical shift left"]
23113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_x)"]
23114#[inline(always)]
23115#[target_feature(enable = "sve")]
23116#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23117#[cfg_attr(test, assert_instr(lsl))]
23118pub fn svlsl_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
23119    svlsl_wide_s32_m(pg, op1, op2)
23120}
23121#[doc = "Logical shift left"]
23122#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_x)"]
23123#[inline(always)]
23124#[target_feature(enable = "sve")]
23125#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23126#[cfg_attr(test, assert_instr(lsl))]
23127pub fn svlsl_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
23128    svlsl_wide_s32_x(pg, op1, svdup_n_u64(op2))
23129}
23130#[doc = "Logical shift left"]
23131#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_z)"]
23132#[inline(always)]
23133#[target_feature(enable = "sve")]
23134#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23135#[cfg_attr(test, assert_instr(lsl))]
23136pub fn svlsl_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
23137    svlsl_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
23138}
23139#[doc = "Logical shift left"]
23140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_z)"]
23141#[inline(always)]
23142#[target_feature(enable = "sve")]
23143#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23144#[cfg_attr(test, assert_instr(lsl))]
23145pub fn svlsl_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
23146    svlsl_wide_s32_z(pg, op1, svdup_n_u64(op2))
23147}
23148#[doc = "Logical shift left"]
23149#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_m)"]
23150#[inline(always)]
23151#[target_feature(enable = "sve")]
23152#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23153#[cfg_attr(test, assert_instr(lsl))]
23154pub fn svlsl_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
23155    unsafe { svlsl_wide_s8_m(pg, op1.as_signed(), op2).as_unsigned() }
23156}
23157#[doc = "Logical shift left"]
23158#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_m)"]
23159#[inline(always)]
23160#[target_feature(enable = "sve")]
23161#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23162#[cfg_attr(test, assert_instr(lsl))]
23163pub fn svlsl_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
23164    svlsl_wide_u8_m(pg, op1, svdup_n_u64(op2))
23165}
23166#[doc = "Logical shift left"]
23167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_x)"]
23168#[inline(always)]
23169#[target_feature(enable = "sve")]
23170#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23171#[cfg_attr(test, assert_instr(lsl))]
23172pub fn svlsl_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
23173    svlsl_wide_u8_m(pg, op1, op2)
23174}
23175#[doc = "Logical shift left"]
23176#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_x)"]
23177#[inline(always)]
23178#[target_feature(enable = "sve")]
23179#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23180#[cfg_attr(test, assert_instr(lsl))]
23181pub fn svlsl_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
23182    svlsl_wide_u8_x(pg, op1, svdup_n_u64(op2))
23183}
23184#[doc = "Logical shift left"]
23185#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_z)"]
23186#[inline(always)]
23187#[target_feature(enable = "sve")]
23188#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23189#[cfg_attr(test, assert_instr(lsl))]
23190pub fn svlsl_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
23191    svlsl_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
23192}
23193#[doc = "Logical shift left"]
23194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_z)"]
23195#[inline(always)]
23196#[target_feature(enable = "sve")]
23197#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23198#[cfg_attr(test, assert_instr(lsl))]
23199pub fn svlsl_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
23200    svlsl_wide_u8_z(pg, op1, svdup_n_u64(op2))
23201}
23202#[doc = "Logical shift left"]
23203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_m)"]
23204#[inline(always)]
23205#[target_feature(enable = "sve")]
23206#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23207#[cfg_attr(test, assert_instr(lsl))]
23208pub fn svlsl_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
23209    unsafe { svlsl_wide_s16_m(pg, op1.as_signed(), op2).as_unsigned() }
23210}
23211#[doc = "Logical shift left"]
23212#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_m)"]
23213#[inline(always)]
23214#[target_feature(enable = "sve")]
23215#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23216#[cfg_attr(test, assert_instr(lsl))]
23217pub fn svlsl_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
23218    svlsl_wide_u16_m(pg, op1, svdup_n_u64(op2))
23219}
23220#[doc = "Logical shift left"]
23221#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_x)"]
23222#[inline(always)]
23223#[target_feature(enable = "sve")]
23224#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23225#[cfg_attr(test, assert_instr(lsl))]
23226pub fn svlsl_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
23227    svlsl_wide_u16_m(pg, op1, op2)
23228}
23229#[doc = "Logical shift left"]
23230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_x)"]
23231#[inline(always)]
23232#[target_feature(enable = "sve")]
23233#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23234#[cfg_attr(test, assert_instr(lsl))]
23235pub fn svlsl_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
23236    svlsl_wide_u16_x(pg, op1, svdup_n_u64(op2))
23237}
23238#[doc = "Logical shift left"]
23239#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_z)"]
23240#[inline(always)]
23241#[target_feature(enable = "sve")]
23242#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23243#[cfg_attr(test, assert_instr(lsl))]
23244pub fn svlsl_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
23245    svlsl_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
23246}
23247#[doc = "Logical shift left"]
23248#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_z)"]
23249#[inline(always)]
23250#[target_feature(enable = "sve")]
23251#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23252#[cfg_attr(test, assert_instr(lsl))]
23253pub fn svlsl_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
23254    svlsl_wide_u16_z(pg, op1, svdup_n_u64(op2))
23255}
23256#[doc = "Logical shift left"]
23257#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_m)"]
23258#[inline(always)]
23259#[target_feature(enable = "sve")]
23260#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23261#[cfg_attr(test, assert_instr(lsl))]
23262pub fn svlsl_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
23263    unsafe { svlsl_wide_s32_m(pg, op1.as_signed(), op2).as_unsigned() }
23264}
23265#[doc = "Logical shift left"]
23266#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_m)"]
23267#[inline(always)]
23268#[target_feature(enable = "sve")]
23269#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23270#[cfg_attr(test, assert_instr(lsl))]
23271pub fn svlsl_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
23272    svlsl_wide_u32_m(pg, op1, svdup_n_u64(op2))
23273}
23274#[doc = "Logical shift left"]
23275#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_x)"]
23276#[inline(always)]
23277#[target_feature(enable = "sve")]
23278#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23279#[cfg_attr(test, assert_instr(lsl))]
23280pub fn svlsl_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
23281    svlsl_wide_u32_m(pg, op1, op2)
23282}
23283#[doc = "Logical shift left"]
23284#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_x)"]
23285#[inline(always)]
23286#[target_feature(enable = "sve")]
23287#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23288#[cfg_attr(test, assert_instr(lsl))]
23289pub fn svlsl_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
23290    svlsl_wide_u32_x(pg, op1, svdup_n_u64(op2))
23291}
23292#[doc = "Logical shift left"]
23293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_z)"]
23294#[inline(always)]
23295#[target_feature(enable = "sve")]
23296#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23297#[cfg_attr(test, assert_instr(lsl))]
23298pub fn svlsl_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
23299    svlsl_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
23300}
23301#[doc = "Logical shift left"]
23302#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_z)"]
23303#[inline(always)]
23304#[target_feature(enable = "sve")]
23305#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23306#[cfg_attr(test, assert_instr(lsl))]
23307pub fn svlsl_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
23308    svlsl_wide_u32_z(pg, op1, svdup_n_u64(op2))
23309}
23310#[doc = "Logical shift right"]
23311#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_m)"]
23312#[inline(always)]
23313#[target_feature(enable = "sve")]
23314#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23315#[cfg_attr(test, assert_instr(lsr))]
23316pub fn svlsr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
23317    unsafe extern "unadjusted" {
23318        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv16i8")]
23319        fn _svlsr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
23320    }
23321    unsafe { _svlsr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
23322}
23323#[doc = "Logical shift right"]
23324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_m)"]
23325#[inline(always)]
23326#[target_feature(enable = "sve")]
23327#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23328#[cfg_attr(test, assert_instr(lsr))]
23329pub fn svlsr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
23330    svlsr_u8_m(pg, op1, svdup_n_u8(op2))
23331}
23332#[doc = "Logical shift right"]
23333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_x)"]
23334#[inline(always)]
23335#[target_feature(enable = "sve")]
23336#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23337#[cfg_attr(test, assert_instr(lsr))]
23338pub fn svlsr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
23339    svlsr_u8_m(pg, op1, op2)
23340}
23341#[doc = "Logical shift right"]
23342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_x)"]
23343#[inline(always)]
23344#[target_feature(enable = "sve")]
23345#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23346#[cfg_attr(test, assert_instr(lsr))]
23347pub fn svlsr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
23348    svlsr_u8_x(pg, op1, svdup_n_u8(op2))
23349}
23350#[doc = "Logical shift right"]
23351#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_z)"]
23352#[inline(always)]
23353#[target_feature(enable = "sve")]
23354#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23355#[cfg_attr(test, assert_instr(lsr))]
23356pub fn svlsr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
23357    svlsr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
23358}
23359#[doc = "Logical shift right"]
23360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_z)"]
23361#[inline(always)]
23362#[target_feature(enable = "sve")]
23363#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23364#[cfg_attr(test, assert_instr(lsr))]
23365pub fn svlsr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
23366    svlsr_u8_z(pg, op1, svdup_n_u8(op2))
23367}
23368#[doc = "Logical shift right"]
23369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_m)"]
23370#[inline(always)]
23371#[target_feature(enable = "sve")]
23372#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23373#[cfg_attr(test, assert_instr(lsr))]
23374pub fn svlsr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
23375    unsafe extern "unadjusted" {
23376        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv8i16")]
23377        fn _svlsr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
23378    }
23379    unsafe { _svlsr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
23380}
23381#[doc = "Logical shift right"]
23382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_m)"]
23383#[inline(always)]
23384#[target_feature(enable = "sve")]
23385#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23386#[cfg_attr(test, assert_instr(lsr))]
23387pub fn svlsr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
23388    svlsr_u16_m(pg, op1, svdup_n_u16(op2))
23389}
23390#[doc = "Logical shift right"]
23391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_x)"]
23392#[inline(always)]
23393#[target_feature(enable = "sve")]
23394#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23395#[cfg_attr(test, assert_instr(lsr))]
23396pub fn svlsr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
23397    svlsr_u16_m(pg, op1, op2)
23398}
23399#[doc = "Logical shift right"]
23400#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_x)"]
23401#[inline(always)]
23402#[target_feature(enable = "sve")]
23403#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23404#[cfg_attr(test, assert_instr(lsr))]
23405pub fn svlsr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
23406    svlsr_u16_x(pg, op1, svdup_n_u16(op2))
23407}
23408#[doc = "Logical shift right"]
23409#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_z)"]
23410#[inline(always)]
23411#[target_feature(enable = "sve")]
23412#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23413#[cfg_attr(test, assert_instr(lsr))]
23414pub fn svlsr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
23415    svlsr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
23416}
23417#[doc = "Logical shift right"]
23418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_z)"]
23419#[inline(always)]
23420#[target_feature(enable = "sve")]
23421#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23422#[cfg_attr(test, assert_instr(lsr))]
23423pub fn svlsr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
23424    svlsr_u16_z(pg, op1, svdup_n_u16(op2))
23425}
23426#[doc = "Logical shift right"]
23427#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_m)"]
23428#[inline(always)]
23429#[target_feature(enable = "sve")]
23430#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23431#[cfg_attr(test, assert_instr(lsr))]
23432pub fn svlsr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
23433    unsafe extern "unadjusted" {
23434        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv4i32")]
23435        fn _svlsr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
23436    }
23437    unsafe { _svlsr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
23438}
23439#[doc = "Logical shift right"]
23440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_m)"]
23441#[inline(always)]
23442#[target_feature(enable = "sve")]
23443#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23444#[cfg_attr(test, assert_instr(lsr))]
23445pub fn svlsr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
23446    svlsr_u32_m(pg, op1, svdup_n_u32(op2))
23447}
23448#[doc = "Logical shift right"]
23449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_x)"]
23450#[inline(always)]
23451#[target_feature(enable = "sve")]
23452#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23453#[cfg_attr(test, assert_instr(lsr))]
23454pub fn svlsr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
23455    svlsr_u32_m(pg, op1, op2)
23456}
23457#[doc = "Logical shift right"]
23458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_x)"]
23459#[inline(always)]
23460#[target_feature(enable = "sve")]
23461#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23462#[cfg_attr(test, assert_instr(lsr))]
23463pub fn svlsr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
23464    svlsr_u32_x(pg, op1, svdup_n_u32(op2))
23465}
23466#[doc = "Logical shift right"]
23467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_z)"]
23468#[inline(always)]
23469#[target_feature(enable = "sve")]
23470#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23471#[cfg_attr(test, assert_instr(lsr))]
23472pub fn svlsr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
23473    svlsr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
23474}
23475#[doc = "Logical shift right"]
23476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_z)"]
23477#[inline(always)]
23478#[target_feature(enable = "sve")]
23479#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23480#[cfg_attr(test, assert_instr(lsr))]
23481pub fn svlsr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
23482    svlsr_u32_z(pg, op1, svdup_n_u32(op2))
23483}
23484#[doc = "Logical shift right"]
23485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_m)"]
23486#[inline(always)]
23487#[target_feature(enable = "sve")]
23488#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23489#[cfg_attr(test, assert_instr(lsr))]
23490pub fn svlsr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
23491    unsafe extern "unadjusted" {
23492        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv2i64")]
23493        fn _svlsr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
23494    }
23495    unsafe { _svlsr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
23496}
23497#[doc = "Logical shift right"]
23498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_m)"]
23499#[inline(always)]
23500#[target_feature(enable = "sve")]
23501#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23502#[cfg_attr(test, assert_instr(lsr))]
23503pub fn svlsr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
23504    svlsr_u64_m(pg, op1, svdup_n_u64(op2))
23505}
23506#[doc = "Logical shift right"]
23507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_x)"]
23508#[inline(always)]
23509#[target_feature(enable = "sve")]
23510#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23511#[cfg_attr(test, assert_instr(lsr))]
23512pub fn svlsr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
23513    svlsr_u64_m(pg, op1, op2)
23514}
23515#[doc = "Logical shift right"]
23516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_x)"]
23517#[inline(always)]
23518#[target_feature(enable = "sve")]
23519#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23520#[cfg_attr(test, assert_instr(lsr))]
23521pub fn svlsr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
23522    svlsr_u64_x(pg, op1, svdup_n_u64(op2))
23523}
23524#[doc = "Logical shift right"]
23525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_z)"]
23526#[inline(always)]
23527#[target_feature(enable = "sve")]
23528#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23529#[cfg_attr(test, assert_instr(lsr))]
23530pub fn svlsr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
23531    svlsr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
23532}
23533#[doc = "Logical shift right"]
23534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_z)"]
23535#[inline(always)]
23536#[target_feature(enable = "sve")]
23537#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23538#[cfg_attr(test, assert_instr(lsr))]
23539pub fn svlsr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
23540    svlsr_u64_z(pg, op1, svdup_n_u64(op2))
23541}
23542#[doc = "Logical shift right"]
23543#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_m)"]
23544#[inline(always)]
23545#[target_feature(enable = "sve")]
23546#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23547#[cfg_attr(test, assert_instr(lsr))]
23548pub fn svlsr_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
23549    unsafe extern "unadjusted" {
23550        #[cfg_attr(
23551            target_arch = "aarch64",
23552            link_name = "llvm.aarch64.sve.lsr.wide.nxv16i8"
23553        )]
23554        fn _svlsr_wide_u8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t;
23555    }
23556    unsafe { _svlsr_wide_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
23557}
23558#[doc = "Logical shift right"]
23559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_m)"]
23560#[inline(always)]
23561#[target_feature(enable = "sve")]
23562#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23563#[cfg_attr(test, assert_instr(lsr))]
23564pub fn svlsr_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
23565    svlsr_wide_u8_m(pg, op1, svdup_n_u64(op2))
23566}
23567#[doc = "Logical shift right"]
23568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_x)"]
23569#[inline(always)]
23570#[target_feature(enable = "sve")]
23571#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23572#[cfg_attr(test, assert_instr(lsr))]
23573pub fn svlsr_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
23574    svlsr_wide_u8_m(pg, op1, op2)
23575}
23576#[doc = "Logical shift right"]
23577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_x)"]
23578#[inline(always)]
23579#[target_feature(enable = "sve")]
23580#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23581#[cfg_attr(test, assert_instr(lsr))]
23582pub fn svlsr_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
23583    svlsr_wide_u8_x(pg, op1, svdup_n_u64(op2))
23584}
23585#[doc = "Logical shift right"]
23586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_z)"]
23587#[inline(always)]
23588#[target_feature(enable = "sve")]
23589#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23590#[cfg_attr(test, assert_instr(lsr))]
23591pub fn svlsr_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
23592    svlsr_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
23593}
23594#[doc = "Logical shift right"]
23595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_z)"]
23596#[inline(always)]
23597#[target_feature(enable = "sve")]
23598#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23599#[cfg_attr(test, assert_instr(lsr))]
23600pub fn svlsr_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
23601    svlsr_wide_u8_z(pg, op1, svdup_n_u64(op2))
23602}
23603#[doc = "Logical shift right"]
23604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_m)"]
23605#[inline(always)]
23606#[target_feature(enable = "sve")]
23607#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23608#[cfg_attr(test, assert_instr(lsr))]
23609pub fn svlsr_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
23610    unsafe extern "unadjusted" {
23611        #[cfg_attr(
23612            target_arch = "aarch64",
23613            link_name = "llvm.aarch64.sve.lsr.wide.nxv8i16"
23614        )]
23615        fn _svlsr_wide_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t;
23616    }
23617    unsafe { _svlsr_wide_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
23618}
23619#[doc = "Logical shift right"]
23620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_m)"]
23621#[inline(always)]
23622#[target_feature(enable = "sve")]
23623#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23624#[cfg_attr(test, assert_instr(lsr))]
23625pub fn svlsr_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
23626    svlsr_wide_u16_m(pg, op1, svdup_n_u64(op2))
23627}
23628#[doc = "Logical shift right"]
23629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_x)"]
23630#[inline(always)]
23631#[target_feature(enable = "sve")]
23632#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23633#[cfg_attr(test, assert_instr(lsr))]
23634pub fn svlsr_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
23635    svlsr_wide_u16_m(pg, op1, op2)
23636}
23637#[doc = "Logical shift right"]
23638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_x)"]
23639#[inline(always)]
23640#[target_feature(enable = "sve")]
23641#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23642#[cfg_attr(test, assert_instr(lsr))]
23643pub fn svlsr_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
23644    svlsr_wide_u16_x(pg, op1, svdup_n_u64(op2))
23645}
23646#[doc = "Logical shift right"]
23647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_z)"]
23648#[inline(always)]
23649#[target_feature(enable = "sve")]
23650#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23651#[cfg_attr(test, assert_instr(lsr))]
23652pub fn svlsr_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
23653    svlsr_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
23654}
23655#[doc = "Logical shift right"]
23656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_z)"]
23657#[inline(always)]
23658#[target_feature(enable = "sve")]
23659#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23660#[cfg_attr(test, assert_instr(lsr))]
23661pub fn svlsr_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
23662    svlsr_wide_u16_z(pg, op1, svdup_n_u64(op2))
23663}
23664#[doc = "Logical shift right"]
23665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_m)"]
23666#[inline(always)]
23667#[target_feature(enable = "sve")]
23668#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23669#[cfg_attr(test, assert_instr(lsr))]
23670pub fn svlsr_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
23671    unsafe extern "unadjusted" {
23672        #[cfg_attr(
23673            target_arch = "aarch64",
23674            link_name = "llvm.aarch64.sve.lsr.wide.nxv4i32"
23675        )]
23676        fn _svlsr_wide_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t;
23677    }
23678    unsafe { _svlsr_wide_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
23679}
23680#[doc = "Logical shift right"]
23681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_m)"]
23682#[inline(always)]
23683#[target_feature(enable = "sve")]
23684#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23685#[cfg_attr(test, assert_instr(lsr))]
23686pub fn svlsr_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
23687    svlsr_wide_u32_m(pg, op1, svdup_n_u64(op2))
23688}
23689#[doc = "Logical shift right"]
23690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_x)"]
23691#[inline(always)]
23692#[target_feature(enable = "sve")]
23693#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23694#[cfg_attr(test, assert_instr(lsr))]
23695pub fn svlsr_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
23696    svlsr_wide_u32_m(pg, op1, op2)
23697}
23698#[doc = "Logical shift right"]
23699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_x)"]
23700#[inline(always)]
23701#[target_feature(enable = "sve")]
23702#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23703#[cfg_attr(test, assert_instr(lsr))]
23704pub fn svlsr_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
23705    svlsr_wide_u32_x(pg, op1, svdup_n_u64(op2))
23706}
23707#[doc = "Logical shift right"]
23708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_z)"]
23709#[inline(always)]
23710#[target_feature(enable = "sve")]
23711#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23712#[cfg_attr(test, assert_instr(lsr))]
23713pub fn svlsr_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
23714    svlsr_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
23715}
23716#[doc = "Logical shift right"]
23717#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_z)"]
23718#[inline(always)]
23719#[target_feature(enable = "sve")]
23720#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23721#[cfg_attr(test, assert_instr(lsr))]
23722pub fn svlsr_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
23723    svlsr_wide_u32_z(pg, op1, svdup_n_u64(op2))
23724}
23725#[doc = "Multiply-add, multiplicand first"]
23726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_m)"]
23727#[inline(always)]
23728#[target_feature(enable = "sve")]
23729#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23730#[cfg_attr(test, assert_instr(fmad))]
23731pub fn svmad_f32_m(
23732    pg: svbool_t,
23733    op1: svfloat32_t,
23734    op2: svfloat32_t,
23735    op3: svfloat32_t,
23736) -> svfloat32_t {
23737    unsafe extern "unadjusted" {
23738        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv4f32")]
23739        fn _svmad_f32_m(
23740            pg: svbool4_t,
23741            op1: svfloat32_t,
23742            op2: svfloat32_t,
23743            op3: svfloat32_t,
23744        ) -> svfloat32_t;
23745    }
23746    unsafe { _svmad_f32_m(pg.sve_into(), op1, op2, op3) }
23747}
23748#[doc = "Multiply-add, multiplicand first"]
23749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_m)"]
23750#[inline(always)]
23751#[target_feature(enable = "sve")]
23752#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23753#[cfg_attr(test, assert_instr(fmad))]
23754pub fn svmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
23755    svmad_f32_m(pg, op1, op2, svdup_n_f32(op3))
23756}
23757#[doc = "Multiply-add, multiplicand first"]
23758#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_x)"]
23759#[inline(always)]
23760#[target_feature(enable = "sve")]
23761#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23762#[cfg_attr(test, assert_instr(fmad))]
23763pub fn svmad_f32_x(
23764    pg: svbool_t,
23765    op1: svfloat32_t,
23766    op2: svfloat32_t,
23767    op3: svfloat32_t,
23768) -> svfloat32_t {
23769    svmad_f32_m(pg, op1, op2, op3)
23770}
23771#[doc = "Multiply-add, multiplicand first"]
23772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_x)"]
23773#[inline(always)]
23774#[target_feature(enable = "sve")]
23775#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23776#[cfg_attr(test, assert_instr(fmad))]
23777pub fn svmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
23778    svmad_f32_x(pg, op1, op2, svdup_n_f32(op3))
23779}
23780#[doc = "Multiply-add, multiplicand first"]
23781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_z)"]
23782#[inline(always)]
23783#[target_feature(enable = "sve")]
23784#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23785#[cfg_attr(test, assert_instr(fmad))]
23786pub fn svmad_f32_z(
23787    pg: svbool_t,
23788    op1: svfloat32_t,
23789    op2: svfloat32_t,
23790    op3: svfloat32_t,
23791) -> svfloat32_t {
23792    svmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
23793}
23794#[doc = "Multiply-add, multiplicand first"]
23795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_z)"]
23796#[inline(always)]
23797#[target_feature(enable = "sve")]
23798#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23799#[cfg_attr(test, assert_instr(fmad))]
23800pub fn svmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
23801    svmad_f32_z(pg, op1, op2, svdup_n_f32(op3))
23802}
23803#[doc = "Multiply-add, multiplicand first"]
23804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_m)"]
23805#[inline(always)]
23806#[target_feature(enable = "sve")]
23807#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23808#[cfg_attr(test, assert_instr(fmad))]
23809pub fn svmad_f64_m(
23810    pg: svbool_t,
23811    op1: svfloat64_t,
23812    op2: svfloat64_t,
23813    op3: svfloat64_t,
23814) -> svfloat64_t {
23815    unsafe extern "unadjusted" {
23816        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv2f64")]
23817        fn _svmad_f64_m(
23818            pg: svbool2_t,
23819            op1: svfloat64_t,
23820            op2: svfloat64_t,
23821            op3: svfloat64_t,
23822        ) -> svfloat64_t;
23823    }
23824    unsafe { _svmad_f64_m(pg.sve_into(), op1, op2, op3) }
23825}
23826#[doc = "Multiply-add, multiplicand first"]
23827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_m)"]
23828#[inline(always)]
23829#[target_feature(enable = "sve")]
23830#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23831#[cfg_attr(test, assert_instr(fmad))]
23832pub fn svmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
23833    svmad_f64_m(pg, op1, op2, svdup_n_f64(op3))
23834}
23835#[doc = "Multiply-add, multiplicand first"]
23836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_x)"]
23837#[inline(always)]
23838#[target_feature(enable = "sve")]
23839#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23840#[cfg_attr(test, assert_instr(fmad))]
23841pub fn svmad_f64_x(
23842    pg: svbool_t,
23843    op1: svfloat64_t,
23844    op2: svfloat64_t,
23845    op3: svfloat64_t,
23846) -> svfloat64_t {
23847    svmad_f64_m(pg, op1, op2, op3)
23848}
23849#[doc = "Multiply-add, multiplicand first"]
23850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_x)"]
23851#[inline(always)]
23852#[target_feature(enable = "sve")]
23853#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23854#[cfg_attr(test, assert_instr(fmad))]
23855pub fn svmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
23856    svmad_f64_x(pg, op1, op2, svdup_n_f64(op3))
23857}
23858#[doc = "Multiply-add, multiplicand first"]
23859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_z)"]
23860#[inline(always)]
23861#[target_feature(enable = "sve")]
23862#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23863#[cfg_attr(test, assert_instr(fmad))]
23864pub fn svmad_f64_z(
23865    pg: svbool_t,
23866    op1: svfloat64_t,
23867    op2: svfloat64_t,
23868    op3: svfloat64_t,
23869) -> svfloat64_t {
23870    svmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
23871}
23872#[doc = "Multiply-add, multiplicand first"]
23873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_z)"]
23874#[inline(always)]
23875#[target_feature(enable = "sve")]
23876#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23877#[cfg_attr(test, assert_instr(fmad))]
23878pub fn svmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
23879    svmad_f64_z(pg, op1, op2, svdup_n_f64(op3))
23880}
23881#[doc = "Multiply-add, multiplicand first"]
23882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_m)"]
23883#[inline(always)]
23884#[target_feature(enable = "sve")]
23885#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23886#[cfg_attr(test, assert_instr(mad))]
23887pub fn svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
23888    unsafe extern "unadjusted" {
23889        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv16i8")]
23890        fn _svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
23891    }
23892    unsafe { _svmad_s8_m(pg, op1, op2, op3) }
23893}
23894#[doc = "Multiply-add, multiplicand first"]
23895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_m)"]
23896#[inline(always)]
23897#[target_feature(enable = "sve")]
23898#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23899#[cfg_attr(test, assert_instr(mad))]
23900pub fn svmad_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
23901    svmad_s8_m(pg, op1, op2, svdup_n_s8(op3))
23902}
23903#[doc = "Multiply-add, multiplicand first"]
23904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_x)"]
23905#[inline(always)]
23906#[target_feature(enable = "sve")]
23907#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23908#[cfg_attr(test, assert_instr(mad))]
23909pub fn svmad_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
23910    svmad_s8_m(pg, op1, op2, op3)
23911}
23912#[doc = "Multiply-add, multiplicand first"]
23913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_x)"]
23914#[inline(always)]
23915#[target_feature(enable = "sve")]
23916#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23917#[cfg_attr(test, assert_instr(mad))]
23918pub fn svmad_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
23919    svmad_s8_x(pg, op1, op2, svdup_n_s8(op3))
23920}
23921#[doc = "Multiply-add, multiplicand first"]
23922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_z)"]
23923#[inline(always)]
23924#[target_feature(enable = "sve")]
23925#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23926#[cfg_attr(test, assert_instr(mad))]
23927pub fn svmad_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
23928    svmad_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3)
23929}
23930#[doc = "Multiply-add, multiplicand first"]
23931#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_z)"]
23932#[inline(always)]
23933#[target_feature(enable = "sve")]
23934#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23935#[cfg_attr(test, assert_instr(mad))]
23936pub fn svmad_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
23937    svmad_s8_z(pg, op1, op2, svdup_n_s8(op3))
23938}
23939#[doc = "Multiply-add, multiplicand first"]
23940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_m)"]
23941#[inline(always)]
23942#[target_feature(enable = "sve")]
23943#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23944#[cfg_attr(test, assert_instr(mad))]
23945pub fn svmad_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
23946    unsafe extern "unadjusted" {
23947        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv8i16")]
23948        fn _svmad_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t)
23949            -> svint16_t;
23950    }
23951    unsafe { _svmad_s16_m(pg.sve_into(), op1, op2, op3) }
23952}
23953#[doc = "Multiply-add, multiplicand first"]
23954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_m)"]
23955#[inline(always)]
23956#[target_feature(enable = "sve")]
23957#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23958#[cfg_attr(test, assert_instr(mad))]
23959pub fn svmad_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
23960    svmad_s16_m(pg, op1, op2, svdup_n_s16(op3))
23961}
23962#[doc = "Multiply-add, multiplicand first"]
23963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_x)"]
23964#[inline(always)]
23965#[target_feature(enable = "sve")]
23966#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23967#[cfg_attr(test, assert_instr(mad))]
23968pub fn svmad_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
23969    svmad_s16_m(pg, op1, op2, op3)
23970}
23971#[doc = "Multiply-add, multiplicand first"]
23972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_x)"]
23973#[inline(always)]
23974#[target_feature(enable = "sve")]
23975#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23976#[cfg_attr(test, assert_instr(mad))]
23977pub fn svmad_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
23978    svmad_s16_x(pg, op1, op2, svdup_n_s16(op3))
23979}
23980#[doc = "Multiply-add, multiplicand first"]
23981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_z)"]
23982#[inline(always)]
23983#[target_feature(enable = "sve")]
23984#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23985#[cfg_attr(test, assert_instr(mad))]
23986pub fn svmad_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
23987    svmad_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3)
23988}
23989#[doc = "Multiply-add, multiplicand first"]
23990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_z)"]
23991#[inline(always)]
23992#[target_feature(enable = "sve")]
23993#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
23994#[cfg_attr(test, assert_instr(mad))]
23995pub fn svmad_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
23996    svmad_s16_z(pg, op1, op2, svdup_n_s16(op3))
23997}
23998#[doc = "Multiply-add, multiplicand first"]
23999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_m)"]
24000#[inline(always)]
24001#[target_feature(enable = "sve")]
24002#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24003#[cfg_attr(test, assert_instr(mad))]
24004pub fn svmad_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
24005    unsafe extern "unadjusted" {
24006        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv4i32")]
24007        fn _svmad_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t)
24008            -> svint32_t;
24009    }
24010    unsafe { _svmad_s32_m(pg.sve_into(), op1, op2, op3) }
24011}
24012#[doc = "Multiply-add, multiplicand first"]
24013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_m)"]
24014#[inline(always)]
24015#[target_feature(enable = "sve")]
24016#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24017#[cfg_attr(test, assert_instr(mad))]
24018pub fn svmad_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
24019    svmad_s32_m(pg, op1, op2, svdup_n_s32(op3))
24020}
24021#[doc = "Multiply-add, multiplicand first"]
24022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_x)"]
24023#[inline(always)]
24024#[target_feature(enable = "sve")]
24025#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24026#[cfg_attr(test, assert_instr(mad))]
24027pub fn svmad_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
24028    svmad_s32_m(pg, op1, op2, op3)
24029}
24030#[doc = "Multiply-add, multiplicand first"]
24031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_x)"]
24032#[inline(always)]
24033#[target_feature(enable = "sve")]
24034#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24035#[cfg_attr(test, assert_instr(mad))]
24036pub fn svmad_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
24037    svmad_s32_x(pg, op1, op2, svdup_n_s32(op3))
24038}
24039#[doc = "Multiply-add, multiplicand first"]
24040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_z)"]
24041#[inline(always)]
24042#[target_feature(enable = "sve")]
24043#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24044#[cfg_attr(test, assert_instr(mad))]
24045pub fn svmad_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
24046    svmad_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3)
24047}
24048#[doc = "Multiply-add, multiplicand first"]
24049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_z)"]
24050#[inline(always)]
24051#[target_feature(enable = "sve")]
24052#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24053#[cfg_attr(test, assert_instr(mad))]
24054pub fn svmad_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
24055    svmad_s32_z(pg, op1, op2, svdup_n_s32(op3))
24056}
24057#[doc = "Multiply-add, multiplicand first"]
24058#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_m)"]
24059#[inline(always)]
24060#[target_feature(enable = "sve")]
24061#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24062#[cfg_attr(test, assert_instr(mad))]
24063pub fn svmad_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
24064    unsafe extern "unadjusted" {
24065        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv2i64")]
24066        fn _svmad_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t)
24067            -> svint64_t;
24068    }
24069    unsafe { _svmad_s64_m(pg.sve_into(), op1, op2, op3) }
24070}
24071#[doc = "Multiply-add, multiplicand first"]
24072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_m)"]
24073#[inline(always)]
24074#[target_feature(enable = "sve")]
24075#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24076#[cfg_attr(test, assert_instr(mad))]
24077pub fn svmad_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
24078    svmad_s64_m(pg, op1, op2, svdup_n_s64(op3))
24079}
24080#[doc = "Multiply-add, multiplicand first"]
24081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_x)"]
24082#[inline(always)]
24083#[target_feature(enable = "sve")]
24084#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24085#[cfg_attr(test, assert_instr(mad))]
24086pub fn svmad_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
24087    svmad_s64_m(pg, op1, op2, op3)
24088}
24089#[doc = "Multiply-add, multiplicand first"]
24090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_x)"]
24091#[inline(always)]
24092#[target_feature(enable = "sve")]
24093#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24094#[cfg_attr(test, assert_instr(mad))]
24095pub fn svmad_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
24096    svmad_s64_x(pg, op1, op2, svdup_n_s64(op3))
24097}
24098#[doc = "Multiply-add, multiplicand first"]
24099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_z)"]
24100#[inline(always)]
24101#[target_feature(enable = "sve")]
24102#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24103#[cfg_attr(test, assert_instr(mad))]
24104pub fn svmad_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
24105    svmad_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3)
24106}
24107#[doc = "Multiply-add, multiplicand first"]
24108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_z)"]
24109#[inline(always)]
24110#[target_feature(enable = "sve")]
24111#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24112#[cfg_attr(test, assert_instr(mad))]
24113pub fn svmad_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
24114    svmad_s64_z(pg, op1, op2, svdup_n_s64(op3))
24115}
24116#[doc = "Multiply-add, multiplicand first"]
24117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_m)"]
24118#[inline(always)]
24119#[target_feature(enable = "sve")]
24120#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24121#[cfg_attr(test, assert_instr(mad))]
24122pub fn svmad_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
24123    unsafe { svmad_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
24124}
24125#[doc = "Multiply-add, multiplicand first"]
24126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_m)"]
24127#[inline(always)]
24128#[target_feature(enable = "sve")]
24129#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24130#[cfg_attr(test, assert_instr(mad))]
24131pub fn svmad_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
24132    svmad_u8_m(pg, op1, op2, svdup_n_u8(op3))
24133}
24134#[doc = "Multiply-add, multiplicand first"]
24135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_x)"]
24136#[inline(always)]
24137#[target_feature(enable = "sve")]
24138#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24139#[cfg_attr(test, assert_instr(mad))]
24140pub fn svmad_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
24141    svmad_u8_m(pg, op1, op2, op3)
24142}
24143#[doc = "Multiply-add, multiplicand first"]
24144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_x)"]
24145#[inline(always)]
24146#[target_feature(enable = "sve")]
24147#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24148#[cfg_attr(test, assert_instr(mad))]
24149pub fn svmad_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
24150    svmad_u8_x(pg, op1, op2, svdup_n_u8(op3))
24151}
24152#[doc = "Multiply-add, multiplicand first"]
24153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_z)"]
24154#[inline(always)]
24155#[target_feature(enable = "sve")]
24156#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24157#[cfg_attr(test, assert_instr(mad))]
24158pub fn svmad_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
24159    svmad_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3)
24160}
24161#[doc = "Multiply-add, multiplicand first"]
24162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_z)"]
24163#[inline(always)]
24164#[target_feature(enable = "sve")]
24165#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24166#[cfg_attr(test, assert_instr(mad))]
24167pub fn svmad_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
24168    svmad_u8_z(pg, op1, op2, svdup_n_u8(op3))
24169}
24170#[doc = "Multiply-add, multiplicand first"]
24171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_m)"]
24172#[inline(always)]
24173#[target_feature(enable = "sve")]
24174#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24175#[cfg_attr(test, assert_instr(mad))]
24176pub fn svmad_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
24177    unsafe { svmad_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
24178}
24179#[doc = "Multiply-add, multiplicand first"]
24180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_m)"]
24181#[inline(always)]
24182#[target_feature(enable = "sve")]
24183#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24184#[cfg_attr(test, assert_instr(mad))]
24185pub fn svmad_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
24186    svmad_u16_m(pg, op1, op2, svdup_n_u16(op3))
24187}
24188#[doc = "Multiply-add, multiplicand first"]
24189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_x)"]
24190#[inline(always)]
24191#[target_feature(enable = "sve")]
24192#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24193#[cfg_attr(test, assert_instr(mad))]
24194pub fn svmad_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
24195    svmad_u16_m(pg, op1, op2, op3)
24196}
24197#[doc = "Multiply-add, multiplicand first"]
24198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_x)"]
24199#[inline(always)]
24200#[target_feature(enable = "sve")]
24201#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24202#[cfg_attr(test, assert_instr(mad))]
24203pub fn svmad_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
24204    svmad_u16_x(pg, op1, op2, svdup_n_u16(op3))
24205}
24206#[doc = "Multiply-add, multiplicand first"]
24207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_z)"]
24208#[inline(always)]
24209#[target_feature(enable = "sve")]
24210#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24211#[cfg_attr(test, assert_instr(mad))]
24212pub fn svmad_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
24213    svmad_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3)
24214}
24215#[doc = "Multiply-add, multiplicand first"]
24216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_z)"]
24217#[inline(always)]
24218#[target_feature(enable = "sve")]
24219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24220#[cfg_attr(test, assert_instr(mad))]
24221pub fn svmad_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
24222    svmad_u16_z(pg, op1, op2, svdup_n_u16(op3))
24223}
24224#[doc = "Multiply-add, multiplicand first"]
24225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_m)"]
24226#[inline(always)]
24227#[target_feature(enable = "sve")]
24228#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24229#[cfg_attr(test, assert_instr(mad))]
24230pub fn svmad_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
24231    unsafe { svmad_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
24232}
24233#[doc = "Multiply-add, multiplicand first"]
24234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_m)"]
24235#[inline(always)]
24236#[target_feature(enable = "sve")]
24237#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24238#[cfg_attr(test, assert_instr(mad))]
24239pub fn svmad_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
24240    svmad_u32_m(pg, op1, op2, svdup_n_u32(op3))
24241}
24242#[doc = "Multiply-add, multiplicand first"]
24243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_x)"]
24244#[inline(always)]
24245#[target_feature(enable = "sve")]
24246#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24247#[cfg_attr(test, assert_instr(mad))]
24248pub fn svmad_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
24249    svmad_u32_m(pg, op1, op2, op3)
24250}
24251#[doc = "Multiply-add, multiplicand first"]
24252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_x)"]
24253#[inline(always)]
24254#[target_feature(enable = "sve")]
24255#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24256#[cfg_attr(test, assert_instr(mad))]
24257pub fn svmad_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
24258    svmad_u32_x(pg, op1, op2, svdup_n_u32(op3))
24259}
24260#[doc = "Multiply-add, multiplicand first"]
24261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_z)"]
24262#[inline(always)]
24263#[target_feature(enable = "sve")]
24264#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24265#[cfg_attr(test, assert_instr(mad))]
24266pub fn svmad_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
24267    svmad_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3)
24268}
24269#[doc = "Multiply-add, multiplicand first"]
24270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_z)"]
24271#[inline(always)]
24272#[target_feature(enable = "sve")]
24273#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24274#[cfg_attr(test, assert_instr(mad))]
24275pub fn svmad_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
24276    svmad_u32_z(pg, op1, op2, svdup_n_u32(op3))
24277}
24278#[doc = "Multiply-add, multiplicand first"]
24279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_m)"]
24280#[inline(always)]
24281#[target_feature(enable = "sve")]
24282#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24283#[cfg_attr(test, assert_instr(mad))]
24284pub fn svmad_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
24285    unsafe { svmad_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
24286}
24287#[doc = "Multiply-add, multiplicand first"]
24288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_m)"]
24289#[inline(always)]
24290#[target_feature(enable = "sve")]
24291#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24292#[cfg_attr(test, assert_instr(mad))]
24293pub fn svmad_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
24294    svmad_u64_m(pg, op1, op2, svdup_n_u64(op3))
24295}
24296#[doc = "Multiply-add, multiplicand first"]
24297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_x)"]
24298#[inline(always)]
24299#[target_feature(enable = "sve")]
24300#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24301#[cfg_attr(test, assert_instr(mad))]
24302pub fn svmad_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
24303    svmad_u64_m(pg, op1, op2, op3)
24304}
24305#[doc = "Multiply-add, multiplicand first"]
24306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_x)"]
24307#[inline(always)]
24308#[target_feature(enable = "sve")]
24309#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24310#[cfg_attr(test, assert_instr(mad))]
24311pub fn svmad_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
24312    svmad_u64_x(pg, op1, op2, svdup_n_u64(op3))
24313}
24314#[doc = "Multiply-add, multiplicand first"]
24315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_z)"]
24316#[inline(always)]
24317#[target_feature(enable = "sve")]
24318#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24319#[cfg_attr(test, assert_instr(mad))]
24320pub fn svmad_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
24321    svmad_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3)
24322}
24323#[doc = "Multiply-add, multiplicand first"]
24324#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_z)"]
24325#[inline(always)]
24326#[target_feature(enable = "sve")]
24327#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24328#[cfg_attr(test, assert_instr(mad))]
24329pub fn svmad_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
24330    svmad_u64_z(pg, op1, op2, svdup_n_u64(op3))
24331}
24332#[doc = "Maximum"]
24333#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_m)"]
24334#[inline(always)]
24335#[target_feature(enable = "sve")]
24336#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24337#[cfg_attr(test, assert_instr(fmax))]
24338pub fn svmax_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
24339    unsafe extern "unadjusted" {
24340        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv4f32")]
24341        fn _svmax_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
24342    }
24343    unsafe { _svmax_f32_m(pg.sve_into(), op1, op2) }
24344}
24345#[doc = "Maximum"]
24346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_m)"]
24347#[inline(always)]
24348#[target_feature(enable = "sve")]
24349#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24350#[cfg_attr(test, assert_instr(fmax))]
24351pub fn svmax_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
24352    svmax_f32_m(pg, op1, svdup_n_f32(op2))
24353}
24354#[doc = "Maximum"]
24355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_x)"]
24356#[inline(always)]
24357#[target_feature(enable = "sve")]
24358#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24359#[cfg_attr(test, assert_instr(fmax))]
24360pub fn svmax_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
24361    svmax_f32_m(pg, op1, op2)
24362}
24363#[doc = "Maximum"]
24364#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_x)"]
24365#[inline(always)]
24366#[target_feature(enable = "sve")]
24367#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24368#[cfg_attr(test, assert_instr(fmax))]
24369pub fn svmax_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
24370    svmax_f32_x(pg, op1, svdup_n_f32(op2))
24371}
24372#[doc = "Maximum"]
24373#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_z)"]
24374#[inline(always)]
24375#[target_feature(enable = "sve")]
24376#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24377#[cfg_attr(test, assert_instr(fmax))]
24378pub fn svmax_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
24379    svmax_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
24380}
24381#[doc = "Maximum"]
24382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_z)"]
24383#[inline(always)]
24384#[target_feature(enable = "sve")]
24385#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24386#[cfg_attr(test, assert_instr(fmax))]
24387pub fn svmax_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
24388    svmax_f32_z(pg, op1, svdup_n_f32(op2))
24389}
24390#[doc = "Maximum"]
24391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_m)"]
24392#[inline(always)]
24393#[target_feature(enable = "sve")]
24394#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24395#[cfg_attr(test, assert_instr(fmax))]
24396pub fn svmax_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
24397    unsafe extern "unadjusted" {
24398        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv2f64")]
24399        fn _svmax_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
24400    }
24401    unsafe { _svmax_f64_m(pg.sve_into(), op1, op2) }
24402}
24403#[doc = "Maximum"]
24404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_m)"]
24405#[inline(always)]
24406#[target_feature(enable = "sve")]
24407#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24408#[cfg_attr(test, assert_instr(fmax))]
24409pub fn svmax_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
24410    svmax_f64_m(pg, op1, svdup_n_f64(op2))
24411}
24412#[doc = "Maximum"]
24413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_x)"]
24414#[inline(always)]
24415#[target_feature(enable = "sve")]
24416#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24417#[cfg_attr(test, assert_instr(fmax))]
24418pub fn svmax_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
24419    svmax_f64_m(pg, op1, op2)
24420}
24421#[doc = "Maximum"]
24422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_x)"]
24423#[inline(always)]
24424#[target_feature(enable = "sve")]
24425#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24426#[cfg_attr(test, assert_instr(fmax))]
24427pub fn svmax_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
24428    svmax_f64_x(pg, op1, svdup_n_f64(op2))
24429}
24430#[doc = "Maximum"]
24431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_z)"]
24432#[inline(always)]
24433#[target_feature(enable = "sve")]
24434#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24435#[cfg_attr(test, assert_instr(fmax))]
24436pub fn svmax_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
24437    svmax_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
24438}
24439#[doc = "Maximum"]
24440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_z)"]
24441#[inline(always)]
24442#[target_feature(enable = "sve")]
24443#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24444#[cfg_attr(test, assert_instr(fmax))]
24445pub fn svmax_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
24446    svmax_f64_z(pg, op1, svdup_n_f64(op2))
24447}
24448#[doc = "Maximum"]
24449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_m)"]
24450#[inline(always)]
24451#[target_feature(enable = "sve")]
24452#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24453#[cfg_attr(test, assert_instr(smax))]
24454pub fn svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
24455    unsafe extern "unadjusted" {
24456        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv16i8")]
24457        fn _svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
24458    }
24459    unsafe { _svmax_s8_m(pg, op1, op2) }
24460}
24461#[doc = "Maximum"]
24462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_m)"]
24463#[inline(always)]
24464#[target_feature(enable = "sve")]
24465#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24466#[cfg_attr(test, assert_instr(smax))]
24467pub fn svmax_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
24468    svmax_s8_m(pg, op1, svdup_n_s8(op2))
24469}
24470#[doc = "Maximum"]
24471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_x)"]
24472#[inline(always)]
24473#[target_feature(enable = "sve")]
24474#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24475#[cfg_attr(test, assert_instr(smax))]
24476pub fn svmax_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
24477    svmax_s8_m(pg, op1, op2)
24478}
24479#[doc = "Maximum"]
24480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_x)"]
24481#[inline(always)]
24482#[target_feature(enable = "sve")]
24483#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24484#[cfg_attr(test, assert_instr(smax))]
24485pub fn svmax_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
24486    svmax_s8_x(pg, op1, svdup_n_s8(op2))
24487}
24488#[doc = "Maximum"]
24489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_z)"]
24490#[inline(always)]
24491#[target_feature(enable = "sve")]
24492#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24493#[cfg_attr(test, assert_instr(smax))]
24494pub fn svmax_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
24495    svmax_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
24496}
24497#[doc = "Maximum"]
24498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_z)"]
24499#[inline(always)]
24500#[target_feature(enable = "sve")]
24501#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24502#[cfg_attr(test, assert_instr(smax))]
24503pub fn svmax_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
24504    svmax_s8_z(pg, op1, svdup_n_s8(op2))
24505}
24506#[doc = "Maximum"]
24507#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_m)"]
24508#[inline(always)]
24509#[target_feature(enable = "sve")]
24510#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24511#[cfg_attr(test, assert_instr(smax))]
24512pub fn svmax_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
24513    unsafe extern "unadjusted" {
24514        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv8i16")]
24515        fn _svmax_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
24516    }
24517    unsafe { _svmax_s16_m(pg.sve_into(), op1, op2) }
24518}
24519#[doc = "Maximum"]
24520#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_m)"]
24521#[inline(always)]
24522#[target_feature(enable = "sve")]
24523#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24524#[cfg_attr(test, assert_instr(smax))]
24525pub fn svmax_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
24526    svmax_s16_m(pg, op1, svdup_n_s16(op2))
24527}
24528#[doc = "Maximum"]
24529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_x)"]
24530#[inline(always)]
24531#[target_feature(enable = "sve")]
24532#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24533#[cfg_attr(test, assert_instr(smax))]
24534pub fn svmax_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
24535    svmax_s16_m(pg, op1, op2)
24536}
24537#[doc = "Maximum"]
24538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_x)"]
24539#[inline(always)]
24540#[target_feature(enable = "sve")]
24541#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24542#[cfg_attr(test, assert_instr(smax))]
24543pub fn svmax_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
24544    svmax_s16_x(pg, op1, svdup_n_s16(op2))
24545}
24546#[doc = "Maximum"]
24547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_z)"]
24548#[inline(always)]
24549#[target_feature(enable = "sve")]
24550#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24551#[cfg_attr(test, assert_instr(smax))]
24552pub fn svmax_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
24553    svmax_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
24554}
24555#[doc = "Maximum"]
24556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_z)"]
24557#[inline(always)]
24558#[target_feature(enable = "sve")]
24559#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24560#[cfg_attr(test, assert_instr(smax))]
24561pub fn svmax_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
24562    svmax_s16_z(pg, op1, svdup_n_s16(op2))
24563}
24564#[doc = "Maximum"]
24565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_m)"]
24566#[inline(always)]
24567#[target_feature(enable = "sve")]
24568#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24569#[cfg_attr(test, assert_instr(smax))]
24570pub fn svmax_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
24571    unsafe extern "unadjusted" {
24572        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv4i32")]
24573        fn _svmax_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
24574    }
24575    unsafe { _svmax_s32_m(pg.sve_into(), op1, op2) }
24576}
24577#[doc = "Maximum"]
24578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_m)"]
24579#[inline(always)]
24580#[target_feature(enable = "sve")]
24581#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24582#[cfg_attr(test, assert_instr(smax))]
24583pub fn svmax_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
24584    svmax_s32_m(pg, op1, svdup_n_s32(op2))
24585}
24586#[doc = "Maximum"]
24587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_x)"]
24588#[inline(always)]
24589#[target_feature(enable = "sve")]
24590#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24591#[cfg_attr(test, assert_instr(smax))]
24592pub fn svmax_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
24593    svmax_s32_m(pg, op1, op2)
24594}
24595#[doc = "Maximum"]
24596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_x)"]
24597#[inline(always)]
24598#[target_feature(enable = "sve")]
24599#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24600#[cfg_attr(test, assert_instr(smax))]
24601pub fn svmax_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
24602    svmax_s32_x(pg, op1, svdup_n_s32(op2))
24603}
24604#[doc = "Maximum"]
24605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_z)"]
24606#[inline(always)]
24607#[target_feature(enable = "sve")]
24608#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24609#[cfg_attr(test, assert_instr(smax))]
24610pub fn svmax_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
24611    svmax_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
24612}
24613#[doc = "Maximum"]
24614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_z)"]
24615#[inline(always)]
24616#[target_feature(enable = "sve")]
24617#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24618#[cfg_attr(test, assert_instr(smax))]
24619pub fn svmax_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
24620    svmax_s32_z(pg, op1, svdup_n_s32(op2))
24621}
24622#[doc = "Maximum"]
24623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_m)"]
24624#[inline(always)]
24625#[target_feature(enable = "sve")]
24626#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24627#[cfg_attr(test, assert_instr(smax))]
24628pub fn svmax_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
24629    unsafe extern "unadjusted" {
24630        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv2i64")]
24631        fn _svmax_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
24632    }
24633    unsafe { _svmax_s64_m(pg.sve_into(), op1, op2) }
24634}
24635#[doc = "Maximum"]
24636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_m)"]
24637#[inline(always)]
24638#[target_feature(enable = "sve")]
24639#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24640#[cfg_attr(test, assert_instr(smax))]
24641pub fn svmax_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
24642    svmax_s64_m(pg, op1, svdup_n_s64(op2))
24643}
24644#[doc = "Maximum"]
24645#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_x)"]
24646#[inline(always)]
24647#[target_feature(enable = "sve")]
24648#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24649#[cfg_attr(test, assert_instr(smax))]
24650pub fn svmax_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
24651    svmax_s64_m(pg, op1, op2)
24652}
24653#[doc = "Maximum"]
24654#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_x)"]
24655#[inline(always)]
24656#[target_feature(enable = "sve")]
24657#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24658#[cfg_attr(test, assert_instr(smax))]
24659pub fn svmax_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
24660    svmax_s64_x(pg, op1, svdup_n_s64(op2))
24661}
24662#[doc = "Maximum"]
24663#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_z)"]
24664#[inline(always)]
24665#[target_feature(enable = "sve")]
24666#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24667#[cfg_attr(test, assert_instr(smax))]
24668pub fn svmax_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
24669    svmax_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
24670}
24671#[doc = "Maximum"]
24672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_z)"]
24673#[inline(always)]
24674#[target_feature(enable = "sve")]
24675#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24676#[cfg_attr(test, assert_instr(smax))]
24677pub fn svmax_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
24678    svmax_s64_z(pg, op1, svdup_n_s64(op2))
24679}
24680#[doc = "Maximum"]
24681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_m)"]
24682#[inline(always)]
24683#[target_feature(enable = "sve")]
24684#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24685#[cfg_attr(test, assert_instr(umax))]
24686pub fn svmax_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
24687    unsafe extern "unadjusted" {
24688        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv16i8")]
24689        fn _svmax_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
24690    }
24691    unsafe { _svmax_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
24692}
24693#[doc = "Maximum"]
24694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_m)"]
24695#[inline(always)]
24696#[target_feature(enable = "sve")]
24697#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24698#[cfg_attr(test, assert_instr(umax))]
24699pub fn svmax_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
24700    svmax_u8_m(pg, op1, svdup_n_u8(op2))
24701}
24702#[doc = "Maximum"]
24703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_x)"]
24704#[inline(always)]
24705#[target_feature(enable = "sve")]
24706#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24707#[cfg_attr(test, assert_instr(umax))]
24708pub fn svmax_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
24709    svmax_u8_m(pg, op1, op2)
24710}
24711#[doc = "Maximum"]
24712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_x)"]
24713#[inline(always)]
24714#[target_feature(enable = "sve")]
24715#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24716#[cfg_attr(test, assert_instr(umax))]
24717pub fn svmax_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
24718    svmax_u8_x(pg, op1, svdup_n_u8(op2))
24719}
24720#[doc = "Maximum"]
24721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_z)"]
24722#[inline(always)]
24723#[target_feature(enable = "sve")]
24724#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24725#[cfg_attr(test, assert_instr(umax))]
24726pub fn svmax_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
24727    svmax_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
24728}
24729#[doc = "Maximum"]
24730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_z)"]
24731#[inline(always)]
24732#[target_feature(enable = "sve")]
24733#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24734#[cfg_attr(test, assert_instr(umax))]
24735pub fn svmax_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
24736    svmax_u8_z(pg, op1, svdup_n_u8(op2))
24737}
24738#[doc = "Maximum"]
24739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_m)"]
24740#[inline(always)]
24741#[target_feature(enable = "sve")]
24742#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24743#[cfg_attr(test, assert_instr(umax))]
24744pub fn svmax_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
24745    unsafe extern "unadjusted" {
24746        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv8i16")]
24747        fn _svmax_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
24748    }
24749    unsafe { _svmax_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
24750}
24751#[doc = "Maximum"]
24752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_m)"]
24753#[inline(always)]
24754#[target_feature(enable = "sve")]
24755#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24756#[cfg_attr(test, assert_instr(umax))]
24757pub fn svmax_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
24758    svmax_u16_m(pg, op1, svdup_n_u16(op2))
24759}
24760#[doc = "Maximum"]
24761#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_x)"]
24762#[inline(always)]
24763#[target_feature(enable = "sve")]
24764#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24765#[cfg_attr(test, assert_instr(umax))]
24766pub fn svmax_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
24767    svmax_u16_m(pg, op1, op2)
24768}
24769#[doc = "Maximum"]
24770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_x)"]
24771#[inline(always)]
24772#[target_feature(enable = "sve")]
24773#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24774#[cfg_attr(test, assert_instr(umax))]
24775pub fn svmax_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
24776    svmax_u16_x(pg, op1, svdup_n_u16(op2))
24777}
24778#[doc = "Maximum"]
24779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_z)"]
24780#[inline(always)]
24781#[target_feature(enable = "sve")]
24782#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24783#[cfg_attr(test, assert_instr(umax))]
24784pub fn svmax_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
24785    svmax_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
24786}
24787#[doc = "Maximum"]
24788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_z)"]
24789#[inline(always)]
24790#[target_feature(enable = "sve")]
24791#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24792#[cfg_attr(test, assert_instr(umax))]
24793pub fn svmax_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
24794    svmax_u16_z(pg, op1, svdup_n_u16(op2))
24795}
24796#[doc = "Maximum"]
24797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_m)"]
24798#[inline(always)]
24799#[target_feature(enable = "sve")]
24800#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24801#[cfg_attr(test, assert_instr(umax))]
24802pub fn svmax_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
24803    unsafe extern "unadjusted" {
24804        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv4i32")]
24805        fn _svmax_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
24806    }
24807    unsafe { _svmax_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
24808}
24809#[doc = "Maximum"]
24810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_m)"]
24811#[inline(always)]
24812#[target_feature(enable = "sve")]
24813#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24814#[cfg_attr(test, assert_instr(umax))]
24815pub fn svmax_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
24816    svmax_u32_m(pg, op1, svdup_n_u32(op2))
24817}
24818#[doc = "Maximum"]
24819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_x)"]
24820#[inline(always)]
24821#[target_feature(enable = "sve")]
24822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24823#[cfg_attr(test, assert_instr(umax))]
24824pub fn svmax_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
24825    svmax_u32_m(pg, op1, op2)
24826}
24827#[doc = "Maximum"]
24828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_x)"]
24829#[inline(always)]
24830#[target_feature(enable = "sve")]
24831#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24832#[cfg_attr(test, assert_instr(umax))]
24833pub fn svmax_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
24834    svmax_u32_x(pg, op1, svdup_n_u32(op2))
24835}
24836#[doc = "Maximum"]
24837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_z)"]
24838#[inline(always)]
24839#[target_feature(enable = "sve")]
24840#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24841#[cfg_attr(test, assert_instr(umax))]
24842pub fn svmax_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
24843    svmax_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
24844}
24845#[doc = "Maximum"]
24846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_z)"]
24847#[inline(always)]
24848#[target_feature(enable = "sve")]
24849#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24850#[cfg_attr(test, assert_instr(umax))]
24851pub fn svmax_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
24852    svmax_u32_z(pg, op1, svdup_n_u32(op2))
24853}
24854#[doc = "Maximum"]
24855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_m)"]
24856#[inline(always)]
24857#[target_feature(enable = "sve")]
24858#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24859#[cfg_attr(test, assert_instr(umax))]
24860pub fn svmax_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
24861    unsafe extern "unadjusted" {
24862        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv2i64")]
24863        fn _svmax_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
24864    }
24865    unsafe { _svmax_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
24866}
24867#[doc = "Maximum"]
24868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_m)"]
24869#[inline(always)]
24870#[target_feature(enable = "sve")]
24871#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24872#[cfg_attr(test, assert_instr(umax))]
24873pub fn svmax_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
24874    svmax_u64_m(pg, op1, svdup_n_u64(op2))
24875}
24876#[doc = "Maximum"]
24877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_x)"]
24878#[inline(always)]
24879#[target_feature(enable = "sve")]
24880#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24881#[cfg_attr(test, assert_instr(umax))]
24882pub fn svmax_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
24883    svmax_u64_m(pg, op1, op2)
24884}
24885#[doc = "Maximum"]
24886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_x)"]
24887#[inline(always)]
24888#[target_feature(enable = "sve")]
24889#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24890#[cfg_attr(test, assert_instr(umax))]
24891pub fn svmax_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
24892    svmax_u64_x(pg, op1, svdup_n_u64(op2))
24893}
24894#[doc = "Maximum"]
24895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_z)"]
24896#[inline(always)]
24897#[target_feature(enable = "sve")]
24898#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24899#[cfg_attr(test, assert_instr(umax))]
24900pub fn svmax_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
24901    svmax_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
24902}
24903#[doc = "Maximum"]
24904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_z)"]
24905#[inline(always)]
24906#[target_feature(enable = "sve")]
24907#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24908#[cfg_attr(test, assert_instr(umax))]
24909pub fn svmax_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
24910    svmax_u64_z(pg, op1, svdup_n_u64(op2))
24911}
24912#[doc = "Maximum number"]
24913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_m)"]
24914#[inline(always)]
24915#[target_feature(enable = "sve")]
24916#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24917#[cfg_attr(test, assert_instr(fmaxnm))]
24918pub fn svmaxnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
24919    unsafe extern "unadjusted" {
24920        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv4f32")]
24921        fn _svmaxnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
24922    }
24923    unsafe { _svmaxnm_f32_m(pg.sve_into(), op1, op2) }
24924}
24925#[doc = "Maximum number"]
24926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_m)"]
24927#[inline(always)]
24928#[target_feature(enable = "sve")]
24929#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24930#[cfg_attr(test, assert_instr(fmaxnm))]
24931pub fn svmaxnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
24932    svmaxnm_f32_m(pg, op1, svdup_n_f32(op2))
24933}
24934#[doc = "Maximum number"]
24935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_x)"]
24936#[inline(always)]
24937#[target_feature(enable = "sve")]
24938#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24939#[cfg_attr(test, assert_instr(fmaxnm))]
24940pub fn svmaxnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
24941    svmaxnm_f32_m(pg, op1, op2)
24942}
24943#[doc = "Maximum number"]
24944#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_x)"]
24945#[inline(always)]
24946#[target_feature(enable = "sve")]
24947#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24948#[cfg_attr(test, assert_instr(fmaxnm))]
24949pub fn svmaxnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
24950    svmaxnm_f32_x(pg, op1, svdup_n_f32(op2))
24951}
24952#[doc = "Maximum number"]
24953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_z)"]
24954#[inline(always)]
24955#[target_feature(enable = "sve")]
24956#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24957#[cfg_attr(test, assert_instr(fmaxnm))]
24958pub fn svmaxnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
24959    svmaxnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
24960}
24961#[doc = "Maximum number"]
24962#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_z)"]
24963#[inline(always)]
24964#[target_feature(enable = "sve")]
24965#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24966#[cfg_attr(test, assert_instr(fmaxnm))]
24967pub fn svmaxnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
24968    svmaxnm_f32_z(pg, op1, svdup_n_f32(op2))
24969}
24970#[doc = "Maximum number"]
24971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_m)"]
24972#[inline(always)]
24973#[target_feature(enable = "sve")]
24974#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24975#[cfg_attr(test, assert_instr(fmaxnm))]
24976pub fn svmaxnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
24977    unsafe extern "unadjusted" {
24978        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv2f64")]
24979        fn _svmaxnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
24980    }
24981    unsafe { _svmaxnm_f64_m(pg.sve_into(), op1, op2) }
24982}
24983#[doc = "Maximum number"]
24984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_m)"]
24985#[inline(always)]
24986#[target_feature(enable = "sve")]
24987#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24988#[cfg_attr(test, assert_instr(fmaxnm))]
24989pub fn svmaxnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
24990    svmaxnm_f64_m(pg, op1, svdup_n_f64(op2))
24991}
24992#[doc = "Maximum number"]
24993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_x)"]
24994#[inline(always)]
24995#[target_feature(enable = "sve")]
24996#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
24997#[cfg_attr(test, assert_instr(fmaxnm))]
24998pub fn svmaxnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
24999    svmaxnm_f64_m(pg, op1, op2)
25000}
25001#[doc = "Maximum number"]
25002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_x)"]
25003#[inline(always)]
25004#[target_feature(enable = "sve")]
25005#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25006#[cfg_attr(test, assert_instr(fmaxnm))]
25007pub fn svmaxnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
25008    svmaxnm_f64_x(pg, op1, svdup_n_f64(op2))
25009}
25010#[doc = "Maximum number"]
25011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_z)"]
25012#[inline(always)]
25013#[target_feature(enable = "sve")]
25014#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25015#[cfg_attr(test, assert_instr(fmaxnm))]
25016pub fn svmaxnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
25017    svmaxnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
25018}
25019#[doc = "Maximum number"]
25020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_z)"]
25021#[inline(always)]
25022#[target_feature(enable = "sve")]
25023#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25024#[cfg_attr(test, assert_instr(fmaxnm))]
25025pub fn svmaxnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
25026    svmaxnm_f64_z(pg, op1, svdup_n_f64(op2))
25027}
25028#[doc = "Maximum number reduction to scalar"]
25029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f32])"]
25030#[inline(always)]
25031#[target_feature(enable = "sve")]
25032#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25033#[cfg_attr(test, assert_instr(fmaxnmv))]
25034pub fn svmaxnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
25035    unsafe extern "unadjusted" {
25036        #[cfg_attr(
25037            target_arch = "aarch64",
25038            link_name = "llvm.aarch64.sve.fmaxnmv.nxv4f32"
25039        )]
25040        fn _svmaxnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
25041    }
25042    unsafe { _svmaxnmv_f32(pg.sve_into(), op) }
25043}
25044#[doc = "Maximum number reduction to scalar"]
25045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f64])"]
25046#[inline(always)]
25047#[target_feature(enable = "sve")]
25048#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25049#[cfg_attr(test, assert_instr(fmaxnmv))]
25050pub fn svmaxnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
25051    unsafe extern "unadjusted" {
25052        #[cfg_attr(
25053            target_arch = "aarch64",
25054            link_name = "llvm.aarch64.sve.fmaxnmv.nxv2f64"
25055        )]
25056        fn _svmaxnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
25057    }
25058    unsafe { _svmaxnmv_f64(pg.sve_into(), op) }
25059}
25060#[doc = "Maximum reduction to scalar"]
25061#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f32])"]
25062#[inline(always)]
25063#[target_feature(enable = "sve")]
25064#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25065#[cfg_attr(test, assert_instr(fmaxv))]
25066pub fn svmaxv_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
25067    unsafe extern "unadjusted" {
25068        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv4f32")]
25069        fn _svmaxv_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
25070    }
25071    unsafe { _svmaxv_f32(pg.sve_into(), op) }
25072}
25073#[doc = "Maximum reduction to scalar"]
25074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f64])"]
25075#[inline(always)]
25076#[target_feature(enable = "sve")]
25077#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25078#[cfg_attr(test, assert_instr(fmaxv))]
25079pub fn svmaxv_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
25080    unsafe extern "unadjusted" {
25081        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv2f64")]
25082        fn _svmaxv_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
25083    }
25084    unsafe { _svmaxv_f64(pg.sve_into(), op) }
25085}
25086#[doc = "Maximum reduction to scalar"]
25087#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s8])"]
25088#[inline(always)]
25089#[target_feature(enable = "sve")]
25090#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25091#[cfg_attr(test, assert_instr(smaxv))]
25092pub fn svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8 {
25093    unsafe extern "unadjusted" {
25094        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv16i8")]
25095        fn _svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8;
25096    }
25097    unsafe { _svmaxv_s8(pg, op) }
25098}
25099#[doc = "Maximum reduction to scalar"]
25100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s16])"]
25101#[inline(always)]
25102#[target_feature(enable = "sve")]
25103#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25104#[cfg_attr(test, assert_instr(smaxv))]
25105pub fn svmaxv_s16(pg: svbool_t, op: svint16_t) -> i16 {
25106    unsafe extern "unadjusted" {
25107        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv8i16")]
25108        fn _svmaxv_s16(pg: svbool8_t, op: svint16_t) -> i16;
25109    }
25110    unsafe { _svmaxv_s16(pg.sve_into(), op) }
25111}
25112#[doc = "Maximum reduction to scalar"]
25113#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s32])"]
25114#[inline(always)]
25115#[target_feature(enable = "sve")]
25116#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25117#[cfg_attr(test, assert_instr(smaxv))]
25118pub fn svmaxv_s32(pg: svbool_t, op: svint32_t) -> i32 {
25119    unsafe extern "unadjusted" {
25120        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv4i32")]
25121        fn _svmaxv_s32(pg: svbool4_t, op: svint32_t) -> i32;
25122    }
25123    unsafe { _svmaxv_s32(pg.sve_into(), op) }
25124}
25125#[doc = "Maximum reduction to scalar"]
25126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s64])"]
25127#[inline(always)]
25128#[target_feature(enable = "sve")]
25129#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25130#[cfg_attr(test, assert_instr(smaxv))]
25131pub fn svmaxv_s64(pg: svbool_t, op: svint64_t) -> i64 {
25132    unsafe extern "unadjusted" {
25133        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv2i64")]
25134        fn _svmaxv_s64(pg: svbool2_t, op: svint64_t) -> i64;
25135    }
25136    unsafe { _svmaxv_s64(pg.sve_into(), op) }
25137}
25138#[doc = "Maximum reduction to scalar"]
25139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u8])"]
25140#[inline(always)]
25141#[target_feature(enable = "sve")]
25142#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25143#[cfg_attr(test, assert_instr(umaxv))]
25144pub fn svmaxv_u8(pg: svbool_t, op: svuint8_t) -> u8 {
25145    unsafe extern "unadjusted" {
25146        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv16i8")]
25147        fn _svmaxv_u8(pg: svbool_t, op: svint8_t) -> i8;
25148    }
25149    unsafe { _svmaxv_u8(pg, op.as_signed()).as_unsigned() }
25150}
25151#[doc = "Maximum reduction to scalar"]
25152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u16])"]
25153#[inline(always)]
25154#[target_feature(enable = "sve")]
25155#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25156#[cfg_attr(test, assert_instr(umaxv))]
25157pub fn svmaxv_u16(pg: svbool_t, op: svuint16_t) -> u16 {
25158    unsafe extern "unadjusted" {
25159        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv8i16")]
25160        fn _svmaxv_u16(pg: svbool8_t, op: svint16_t) -> i16;
25161    }
25162    unsafe { _svmaxv_u16(pg.sve_into(), op.as_signed()).as_unsigned() }
25163}
25164#[doc = "Maximum reduction to scalar"]
25165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u32])"]
25166#[inline(always)]
25167#[target_feature(enable = "sve")]
25168#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25169#[cfg_attr(test, assert_instr(umaxv))]
25170pub fn svmaxv_u32(pg: svbool_t, op: svuint32_t) -> u32 {
25171    unsafe extern "unadjusted" {
25172        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv4i32")]
25173        fn _svmaxv_u32(pg: svbool4_t, op: svint32_t) -> i32;
25174    }
25175    unsafe { _svmaxv_u32(pg.sve_into(), op.as_signed()).as_unsigned() }
25176}
25177#[doc = "Maximum reduction to scalar"]
25178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u64])"]
25179#[inline(always)]
25180#[target_feature(enable = "sve")]
25181#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25182#[cfg_attr(test, assert_instr(umaxv))]
25183pub fn svmaxv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
25184    unsafe extern "unadjusted" {
25185        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv2i64")]
25186        fn _svmaxv_u64(pg: svbool2_t, op: svint64_t) -> i64;
25187    }
25188    unsafe { _svmaxv_u64(pg.sve_into(), op.as_signed()).as_unsigned() }
25189}
25190#[doc = "Minimum"]
25191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_m)"]
25192#[inline(always)]
25193#[target_feature(enable = "sve")]
25194#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25195#[cfg_attr(test, assert_instr(fmin))]
25196pub fn svmin_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
25197    unsafe extern "unadjusted" {
25198        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv4f32")]
25199        fn _svmin_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
25200    }
25201    unsafe { _svmin_f32_m(pg.sve_into(), op1, op2) }
25202}
25203#[doc = "Minimum"]
25204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_m)"]
25205#[inline(always)]
25206#[target_feature(enable = "sve")]
25207#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25208#[cfg_attr(test, assert_instr(fmin))]
25209pub fn svmin_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
25210    svmin_f32_m(pg, op1, svdup_n_f32(op2))
25211}
25212#[doc = "Minimum"]
25213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_x)"]
25214#[inline(always)]
25215#[target_feature(enable = "sve")]
25216#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25217#[cfg_attr(test, assert_instr(fmin))]
25218pub fn svmin_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
25219    svmin_f32_m(pg, op1, op2)
25220}
25221#[doc = "Minimum"]
25222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_x)"]
25223#[inline(always)]
25224#[target_feature(enable = "sve")]
25225#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25226#[cfg_attr(test, assert_instr(fmin))]
25227pub fn svmin_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
25228    svmin_f32_x(pg, op1, svdup_n_f32(op2))
25229}
25230#[doc = "Minimum"]
25231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_z)"]
25232#[inline(always)]
25233#[target_feature(enable = "sve")]
25234#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25235#[cfg_attr(test, assert_instr(fmin))]
25236pub fn svmin_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
25237    svmin_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
25238}
25239#[doc = "Minimum"]
25240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_z)"]
25241#[inline(always)]
25242#[target_feature(enable = "sve")]
25243#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25244#[cfg_attr(test, assert_instr(fmin))]
25245pub fn svmin_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
25246    svmin_f32_z(pg, op1, svdup_n_f32(op2))
25247}
25248#[doc = "Minimum"]
25249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_m)"]
25250#[inline(always)]
25251#[target_feature(enable = "sve")]
25252#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25253#[cfg_attr(test, assert_instr(fmin))]
25254pub fn svmin_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
25255    unsafe extern "unadjusted" {
25256        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv2f64")]
25257        fn _svmin_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
25258    }
25259    unsafe { _svmin_f64_m(pg.sve_into(), op1, op2) }
25260}
25261#[doc = "Minimum"]
25262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_m)"]
25263#[inline(always)]
25264#[target_feature(enable = "sve")]
25265#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25266#[cfg_attr(test, assert_instr(fmin))]
25267pub fn svmin_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
25268    svmin_f64_m(pg, op1, svdup_n_f64(op2))
25269}
25270#[doc = "Minimum"]
25271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_x)"]
25272#[inline(always)]
25273#[target_feature(enable = "sve")]
25274#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25275#[cfg_attr(test, assert_instr(fmin))]
25276pub fn svmin_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
25277    svmin_f64_m(pg, op1, op2)
25278}
25279#[doc = "Minimum"]
25280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_x)"]
25281#[inline(always)]
25282#[target_feature(enable = "sve")]
25283#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25284#[cfg_attr(test, assert_instr(fmin))]
25285pub fn svmin_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
25286    svmin_f64_x(pg, op1, svdup_n_f64(op2))
25287}
25288#[doc = "Minimum"]
25289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_z)"]
25290#[inline(always)]
25291#[target_feature(enable = "sve")]
25292#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25293#[cfg_attr(test, assert_instr(fmin))]
25294pub fn svmin_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
25295    svmin_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
25296}
25297#[doc = "Minimum"]
25298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_z)"]
25299#[inline(always)]
25300#[target_feature(enable = "sve")]
25301#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25302#[cfg_attr(test, assert_instr(fmin))]
25303pub fn svmin_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
25304    svmin_f64_z(pg, op1, svdup_n_f64(op2))
25305}
25306#[doc = "Minimum"]
25307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_m)"]
25308#[inline(always)]
25309#[target_feature(enable = "sve")]
25310#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25311#[cfg_attr(test, assert_instr(smin))]
25312pub fn svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
25313    unsafe extern "unadjusted" {
25314        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv16i8")]
25315        fn _svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
25316    }
25317    unsafe { _svmin_s8_m(pg, op1, op2) }
25318}
25319#[doc = "Minimum"]
25320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_m)"]
25321#[inline(always)]
25322#[target_feature(enable = "sve")]
25323#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25324#[cfg_attr(test, assert_instr(smin))]
25325pub fn svmin_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
25326    svmin_s8_m(pg, op1, svdup_n_s8(op2))
25327}
25328#[doc = "Minimum"]
25329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_x)"]
25330#[inline(always)]
25331#[target_feature(enable = "sve")]
25332#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25333#[cfg_attr(test, assert_instr(smin))]
25334pub fn svmin_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
25335    svmin_s8_m(pg, op1, op2)
25336}
25337#[doc = "Minimum"]
25338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_x)"]
25339#[inline(always)]
25340#[target_feature(enable = "sve")]
25341#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25342#[cfg_attr(test, assert_instr(smin))]
25343pub fn svmin_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
25344    svmin_s8_x(pg, op1, svdup_n_s8(op2))
25345}
25346#[doc = "Minimum"]
25347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_z)"]
25348#[inline(always)]
25349#[target_feature(enable = "sve")]
25350#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25351#[cfg_attr(test, assert_instr(smin))]
25352pub fn svmin_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
25353    svmin_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
25354}
25355#[doc = "Minimum"]
25356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_z)"]
25357#[inline(always)]
25358#[target_feature(enable = "sve")]
25359#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25360#[cfg_attr(test, assert_instr(smin))]
25361pub fn svmin_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
25362    svmin_s8_z(pg, op1, svdup_n_s8(op2))
25363}
25364#[doc = "Minimum"]
25365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_m)"]
25366#[inline(always)]
25367#[target_feature(enable = "sve")]
25368#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25369#[cfg_attr(test, assert_instr(smin))]
25370pub fn svmin_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
25371    unsafe extern "unadjusted" {
25372        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv8i16")]
25373        fn _svmin_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
25374    }
25375    unsafe { _svmin_s16_m(pg.sve_into(), op1, op2) }
25376}
25377#[doc = "Minimum"]
25378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_m)"]
25379#[inline(always)]
25380#[target_feature(enable = "sve")]
25381#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25382#[cfg_attr(test, assert_instr(smin))]
25383pub fn svmin_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
25384    svmin_s16_m(pg, op1, svdup_n_s16(op2))
25385}
25386#[doc = "Minimum"]
25387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_x)"]
25388#[inline(always)]
25389#[target_feature(enable = "sve")]
25390#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25391#[cfg_attr(test, assert_instr(smin))]
25392pub fn svmin_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
25393    svmin_s16_m(pg, op1, op2)
25394}
25395#[doc = "Minimum"]
25396#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_x)"]
25397#[inline(always)]
25398#[target_feature(enable = "sve")]
25399#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25400#[cfg_attr(test, assert_instr(smin))]
25401pub fn svmin_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
25402    svmin_s16_x(pg, op1, svdup_n_s16(op2))
25403}
25404#[doc = "Minimum"]
25405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_z)"]
25406#[inline(always)]
25407#[target_feature(enable = "sve")]
25408#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25409#[cfg_attr(test, assert_instr(smin))]
25410pub fn svmin_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
25411    svmin_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
25412}
25413#[doc = "Minimum"]
25414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_z)"]
25415#[inline(always)]
25416#[target_feature(enable = "sve")]
25417#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25418#[cfg_attr(test, assert_instr(smin))]
25419pub fn svmin_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
25420    svmin_s16_z(pg, op1, svdup_n_s16(op2))
25421}
25422#[doc = "Minimum"]
25423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_m)"]
25424#[inline(always)]
25425#[target_feature(enable = "sve")]
25426#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25427#[cfg_attr(test, assert_instr(smin))]
25428pub fn svmin_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
25429    unsafe extern "unadjusted" {
25430        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv4i32")]
25431        fn _svmin_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
25432    }
25433    unsafe { _svmin_s32_m(pg.sve_into(), op1, op2) }
25434}
25435#[doc = "Minimum"]
25436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_m)"]
25437#[inline(always)]
25438#[target_feature(enable = "sve")]
25439#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25440#[cfg_attr(test, assert_instr(smin))]
25441pub fn svmin_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
25442    svmin_s32_m(pg, op1, svdup_n_s32(op2))
25443}
25444#[doc = "Minimum"]
25445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_x)"]
25446#[inline(always)]
25447#[target_feature(enable = "sve")]
25448#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25449#[cfg_attr(test, assert_instr(smin))]
25450pub fn svmin_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
25451    svmin_s32_m(pg, op1, op2)
25452}
25453#[doc = "Minimum"]
25454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_x)"]
25455#[inline(always)]
25456#[target_feature(enable = "sve")]
25457#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25458#[cfg_attr(test, assert_instr(smin))]
25459pub fn svmin_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
25460    svmin_s32_x(pg, op1, svdup_n_s32(op2))
25461}
25462#[doc = "Minimum"]
25463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_z)"]
25464#[inline(always)]
25465#[target_feature(enable = "sve")]
25466#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25467#[cfg_attr(test, assert_instr(smin))]
25468pub fn svmin_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
25469    svmin_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
25470}
25471#[doc = "Minimum"]
25472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_z)"]
25473#[inline(always)]
25474#[target_feature(enable = "sve")]
25475#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25476#[cfg_attr(test, assert_instr(smin))]
25477pub fn svmin_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
25478    svmin_s32_z(pg, op1, svdup_n_s32(op2))
25479}
25480#[doc = "Minimum"]
25481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_m)"]
25482#[inline(always)]
25483#[target_feature(enable = "sve")]
25484#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25485#[cfg_attr(test, assert_instr(smin))]
25486pub fn svmin_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
25487    unsafe extern "unadjusted" {
25488        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv2i64")]
25489        fn _svmin_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
25490    }
25491    unsafe { _svmin_s64_m(pg.sve_into(), op1, op2) }
25492}
25493#[doc = "Minimum"]
25494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_m)"]
25495#[inline(always)]
25496#[target_feature(enable = "sve")]
25497#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25498#[cfg_attr(test, assert_instr(smin))]
25499pub fn svmin_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
25500    svmin_s64_m(pg, op1, svdup_n_s64(op2))
25501}
25502#[doc = "Minimum"]
25503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_x)"]
25504#[inline(always)]
25505#[target_feature(enable = "sve")]
25506#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25507#[cfg_attr(test, assert_instr(smin))]
25508pub fn svmin_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
25509    svmin_s64_m(pg, op1, op2)
25510}
25511#[doc = "Minimum"]
25512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_x)"]
25513#[inline(always)]
25514#[target_feature(enable = "sve")]
25515#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25516#[cfg_attr(test, assert_instr(smin))]
25517pub fn svmin_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
25518    svmin_s64_x(pg, op1, svdup_n_s64(op2))
25519}
25520#[doc = "Minimum"]
25521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_z)"]
25522#[inline(always)]
25523#[target_feature(enable = "sve")]
25524#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25525#[cfg_attr(test, assert_instr(smin))]
25526pub fn svmin_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
25527    svmin_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
25528}
25529#[doc = "Minimum"]
25530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_z)"]
25531#[inline(always)]
25532#[target_feature(enable = "sve")]
25533#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25534#[cfg_attr(test, assert_instr(smin))]
25535pub fn svmin_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
25536    svmin_s64_z(pg, op1, svdup_n_s64(op2))
25537}
25538#[doc = "Minimum"]
25539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_m)"]
25540#[inline(always)]
25541#[target_feature(enable = "sve")]
25542#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25543#[cfg_attr(test, assert_instr(umin))]
25544pub fn svmin_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
25545    unsafe extern "unadjusted" {
25546        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv16i8")]
25547        fn _svmin_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
25548    }
25549    unsafe { _svmin_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
25550}
25551#[doc = "Minimum"]
25552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_m)"]
25553#[inline(always)]
25554#[target_feature(enable = "sve")]
25555#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25556#[cfg_attr(test, assert_instr(umin))]
25557pub fn svmin_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
25558    svmin_u8_m(pg, op1, svdup_n_u8(op2))
25559}
25560#[doc = "Minimum"]
25561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_x)"]
25562#[inline(always)]
25563#[target_feature(enable = "sve")]
25564#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25565#[cfg_attr(test, assert_instr(umin))]
25566pub fn svmin_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
25567    svmin_u8_m(pg, op1, op2)
25568}
25569#[doc = "Minimum"]
25570#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_x)"]
25571#[inline(always)]
25572#[target_feature(enable = "sve")]
25573#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25574#[cfg_attr(test, assert_instr(umin))]
25575pub fn svmin_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
25576    svmin_u8_x(pg, op1, svdup_n_u8(op2))
25577}
25578#[doc = "Minimum"]
25579#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_z)"]
25580#[inline(always)]
25581#[target_feature(enable = "sve")]
25582#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25583#[cfg_attr(test, assert_instr(umin))]
25584pub fn svmin_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
25585    svmin_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
25586}
25587#[doc = "Minimum"]
25588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_z)"]
25589#[inline(always)]
25590#[target_feature(enable = "sve")]
25591#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25592#[cfg_attr(test, assert_instr(umin))]
25593pub fn svmin_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
25594    svmin_u8_z(pg, op1, svdup_n_u8(op2))
25595}
25596#[doc = "Minimum"]
25597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_m)"]
25598#[inline(always)]
25599#[target_feature(enable = "sve")]
25600#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25601#[cfg_attr(test, assert_instr(umin))]
25602pub fn svmin_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
25603    unsafe extern "unadjusted" {
25604        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv8i16")]
25605        fn _svmin_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
25606    }
25607    unsafe { _svmin_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
25608}
25609#[doc = "Minimum"]
25610#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_m)"]
25611#[inline(always)]
25612#[target_feature(enable = "sve")]
25613#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25614#[cfg_attr(test, assert_instr(umin))]
25615pub fn svmin_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
25616    svmin_u16_m(pg, op1, svdup_n_u16(op2))
25617}
25618#[doc = "Minimum"]
25619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_x)"]
25620#[inline(always)]
25621#[target_feature(enable = "sve")]
25622#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25623#[cfg_attr(test, assert_instr(umin))]
25624pub fn svmin_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
25625    svmin_u16_m(pg, op1, op2)
25626}
25627#[doc = "Minimum"]
25628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_x)"]
25629#[inline(always)]
25630#[target_feature(enable = "sve")]
25631#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25632#[cfg_attr(test, assert_instr(umin))]
25633pub fn svmin_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
25634    svmin_u16_x(pg, op1, svdup_n_u16(op2))
25635}
25636#[doc = "Minimum"]
25637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_z)"]
25638#[inline(always)]
25639#[target_feature(enable = "sve")]
25640#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25641#[cfg_attr(test, assert_instr(umin))]
25642pub fn svmin_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
25643    svmin_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
25644}
25645#[doc = "Minimum"]
25646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_z)"]
25647#[inline(always)]
25648#[target_feature(enable = "sve")]
25649#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25650#[cfg_attr(test, assert_instr(umin))]
25651pub fn svmin_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
25652    svmin_u16_z(pg, op1, svdup_n_u16(op2))
25653}
25654#[doc = "Minimum"]
25655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_m)"]
25656#[inline(always)]
25657#[target_feature(enable = "sve")]
25658#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25659#[cfg_attr(test, assert_instr(umin))]
25660pub fn svmin_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
25661    unsafe extern "unadjusted" {
25662        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv4i32")]
25663        fn _svmin_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
25664    }
25665    unsafe { _svmin_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
25666}
25667#[doc = "Minimum"]
25668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_m)"]
25669#[inline(always)]
25670#[target_feature(enable = "sve")]
25671#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25672#[cfg_attr(test, assert_instr(umin))]
25673pub fn svmin_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
25674    svmin_u32_m(pg, op1, svdup_n_u32(op2))
25675}
25676#[doc = "Minimum"]
25677#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_x)"]
25678#[inline(always)]
25679#[target_feature(enable = "sve")]
25680#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25681#[cfg_attr(test, assert_instr(umin))]
25682pub fn svmin_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
25683    svmin_u32_m(pg, op1, op2)
25684}
25685#[doc = "Minimum"]
25686#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_x)"]
25687#[inline(always)]
25688#[target_feature(enable = "sve")]
25689#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25690#[cfg_attr(test, assert_instr(umin))]
25691pub fn svmin_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
25692    svmin_u32_x(pg, op1, svdup_n_u32(op2))
25693}
25694#[doc = "Minimum"]
25695#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_z)"]
25696#[inline(always)]
25697#[target_feature(enable = "sve")]
25698#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25699#[cfg_attr(test, assert_instr(umin))]
25700pub fn svmin_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
25701    svmin_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
25702}
25703#[doc = "Minimum"]
25704#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_z)"]
25705#[inline(always)]
25706#[target_feature(enable = "sve")]
25707#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25708#[cfg_attr(test, assert_instr(umin))]
25709pub fn svmin_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
25710    svmin_u32_z(pg, op1, svdup_n_u32(op2))
25711}
25712#[doc = "Minimum"]
25713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_m)"]
25714#[inline(always)]
25715#[target_feature(enable = "sve")]
25716#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25717#[cfg_attr(test, assert_instr(umin))]
25718pub fn svmin_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
25719    unsafe extern "unadjusted" {
25720        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv2i64")]
25721        fn _svmin_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
25722    }
25723    unsafe { _svmin_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
25724}
25725#[doc = "Minimum"]
25726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_m)"]
25727#[inline(always)]
25728#[target_feature(enable = "sve")]
25729#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25730#[cfg_attr(test, assert_instr(umin))]
25731pub fn svmin_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
25732    svmin_u64_m(pg, op1, svdup_n_u64(op2))
25733}
25734#[doc = "Minimum"]
25735#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_x)"]
25736#[inline(always)]
25737#[target_feature(enable = "sve")]
25738#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25739#[cfg_attr(test, assert_instr(umin))]
25740pub fn svmin_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
25741    svmin_u64_m(pg, op1, op2)
25742}
25743#[doc = "Minimum"]
25744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_x)"]
25745#[inline(always)]
25746#[target_feature(enable = "sve")]
25747#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25748#[cfg_attr(test, assert_instr(umin))]
25749pub fn svmin_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
25750    svmin_u64_x(pg, op1, svdup_n_u64(op2))
25751}
25752#[doc = "Minimum"]
25753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_z)"]
25754#[inline(always)]
25755#[target_feature(enable = "sve")]
25756#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25757#[cfg_attr(test, assert_instr(umin))]
25758pub fn svmin_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
25759    svmin_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
25760}
25761#[doc = "Minimum"]
25762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_z)"]
25763#[inline(always)]
25764#[target_feature(enable = "sve")]
25765#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25766#[cfg_attr(test, assert_instr(umin))]
25767pub fn svmin_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
25768    svmin_u64_z(pg, op1, svdup_n_u64(op2))
25769}
25770#[doc = "Minimum number"]
25771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_m)"]
25772#[inline(always)]
25773#[target_feature(enable = "sve")]
25774#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25775#[cfg_attr(test, assert_instr(fminnm))]
25776pub fn svminnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
25777    unsafe extern "unadjusted" {
25778        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv4f32")]
25779        fn _svminnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
25780    }
25781    unsafe { _svminnm_f32_m(pg.sve_into(), op1, op2) }
25782}
25783#[doc = "Minimum number"]
25784#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_m)"]
25785#[inline(always)]
25786#[target_feature(enable = "sve")]
25787#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25788#[cfg_attr(test, assert_instr(fminnm))]
25789pub fn svminnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
25790    svminnm_f32_m(pg, op1, svdup_n_f32(op2))
25791}
25792#[doc = "Minimum number"]
25793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_x)"]
25794#[inline(always)]
25795#[target_feature(enable = "sve")]
25796#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25797#[cfg_attr(test, assert_instr(fminnm))]
25798pub fn svminnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
25799    svminnm_f32_m(pg, op1, op2)
25800}
25801#[doc = "Minimum number"]
25802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_x)"]
25803#[inline(always)]
25804#[target_feature(enable = "sve")]
25805#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25806#[cfg_attr(test, assert_instr(fminnm))]
25807pub fn svminnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
25808    svminnm_f32_x(pg, op1, svdup_n_f32(op2))
25809}
25810#[doc = "Minimum number"]
25811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_z)"]
25812#[inline(always)]
25813#[target_feature(enable = "sve")]
25814#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25815#[cfg_attr(test, assert_instr(fminnm))]
25816pub fn svminnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
25817    svminnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
25818}
25819#[doc = "Minimum number"]
25820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_z)"]
25821#[inline(always)]
25822#[target_feature(enable = "sve")]
25823#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25824#[cfg_attr(test, assert_instr(fminnm))]
25825pub fn svminnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
25826    svminnm_f32_z(pg, op1, svdup_n_f32(op2))
25827}
25828#[doc = "Minimum number"]
25829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_m)"]
25830#[inline(always)]
25831#[target_feature(enable = "sve")]
25832#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25833#[cfg_attr(test, assert_instr(fminnm))]
25834pub fn svminnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
25835    unsafe extern "unadjusted" {
25836        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv2f64")]
25837        fn _svminnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
25838    }
25839    unsafe { _svminnm_f64_m(pg.sve_into(), op1, op2) }
25840}
25841#[doc = "Minimum number"]
25842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_m)"]
25843#[inline(always)]
25844#[target_feature(enable = "sve")]
25845#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25846#[cfg_attr(test, assert_instr(fminnm))]
25847pub fn svminnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
25848    svminnm_f64_m(pg, op1, svdup_n_f64(op2))
25849}
25850#[doc = "Minimum number"]
25851#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_x)"]
25852#[inline(always)]
25853#[target_feature(enable = "sve")]
25854#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25855#[cfg_attr(test, assert_instr(fminnm))]
25856pub fn svminnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
25857    svminnm_f64_m(pg, op1, op2)
25858}
25859#[doc = "Minimum number"]
25860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_x)"]
25861#[inline(always)]
25862#[target_feature(enable = "sve")]
25863#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25864#[cfg_attr(test, assert_instr(fminnm))]
25865pub fn svminnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
25866    svminnm_f64_x(pg, op1, svdup_n_f64(op2))
25867}
25868#[doc = "Minimum number"]
25869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_z)"]
25870#[inline(always)]
25871#[target_feature(enable = "sve")]
25872#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25873#[cfg_attr(test, assert_instr(fminnm))]
25874pub fn svminnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
25875    svminnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
25876}
25877#[doc = "Minimum number"]
25878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_z)"]
25879#[inline(always)]
25880#[target_feature(enable = "sve")]
25881#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25882#[cfg_attr(test, assert_instr(fminnm))]
25883pub fn svminnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
25884    svminnm_f64_z(pg, op1, svdup_n_f64(op2))
25885}
25886#[doc = "Minimum number reduction to scalar"]
25887#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f32])"]
25888#[inline(always)]
25889#[target_feature(enable = "sve")]
25890#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25891#[cfg_attr(test, assert_instr(fminnmv))]
25892pub fn svminnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
25893    unsafe extern "unadjusted" {
25894        #[cfg_attr(
25895            target_arch = "aarch64",
25896            link_name = "llvm.aarch64.sve.fminnmv.nxv4f32"
25897        )]
25898        fn _svminnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
25899    }
25900    unsafe { _svminnmv_f32(pg.sve_into(), op) }
25901}
25902#[doc = "Minimum number reduction to scalar"]
25903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f64])"]
25904#[inline(always)]
25905#[target_feature(enable = "sve")]
25906#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25907#[cfg_attr(test, assert_instr(fminnmv))]
25908pub fn svminnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
25909    unsafe extern "unadjusted" {
25910        #[cfg_attr(
25911            target_arch = "aarch64",
25912            link_name = "llvm.aarch64.sve.fminnmv.nxv2f64"
25913        )]
25914        fn _svminnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
25915    }
25916    unsafe { _svminnmv_f64(pg.sve_into(), op) }
25917}
25918#[doc = "Minimum reduction to scalar"]
25919#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f32])"]
25920#[inline(always)]
25921#[target_feature(enable = "sve")]
25922#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25923#[cfg_attr(test, assert_instr(fminv))]
25924pub fn svminv_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
25925    unsafe extern "unadjusted" {
25926        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv4f32")]
25927        fn _svminv_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
25928    }
25929    unsafe { _svminv_f32(pg.sve_into(), op) }
25930}
25931#[doc = "Minimum reduction to scalar"]
25932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f64])"]
25933#[inline(always)]
25934#[target_feature(enable = "sve")]
25935#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25936#[cfg_attr(test, assert_instr(fminv))]
25937pub fn svminv_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
25938    unsafe extern "unadjusted" {
25939        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv2f64")]
25940        fn _svminv_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
25941    }
25942    unsafe { _svminv_f64(pg.sve_into(), op) }
25943}
25944#[doc = "Minimum reduction to scalar"]
25945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s8])"]
25946#[inline(always)]
25947#[target_feature(enable = "sve")]
25948#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25949#[cfg_attr(test, assert_instr(sminv))]
25950pub fn svminv_s8(pg: svbool_t, op: svint8_t) -> i8 {
25951    unsafe extern "unadjusted" {
25952        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv16i8")]
25953        fn _svminv_s8(pg: svbool_t, op: svint8_t) -> i8;
25954    }
25955    unsafe { _svminv_s8(pg, op) }
25956}
25957#[doc = "Minimum reduction to scalar"]
25958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s16])"]
25959#[inline(always)]
25960#[target_feature(enable = "sve")]
25961#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25962#[cfg_attr(test, assert_instr(sminv))]
25963pub fn svminv_s16(pg: svbool_t, op: svint16_t) -> i16 {
25964    unsafe extern "unadjusted" {
25965        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv8i16")]
25966        fn _svminv_s16(pg: svbool8_t, op: svint16_t) -> i16;
25967    }
25968    unsafe { _svminv_s16(pg.sve_into(), op) }
25969}
25970#[doc = "Minimum reduction to scalar"]
25971#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s32])"]
25972#[inline(always)]
25973#[target_feature(enable = "sve")]
25974#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25975#[cfg_attr(test, assert_instr(sminv))]
25976pub fn svminv_s32(pg: svbool_t, op: svint32_t) -> i32 {
25977    unsafe extern "unadjusted" {
25978        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv4i32")]
25979        fn _svminv_s32(pg: svbool4_t, op: svint32_t) -> i32;
25980    }
25981    unsafe { _svminv_s32(pg.sve_into(), op) }
25982}
25983#[doc = "Minimum reduction to scalar"]
25984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s64])"]
25985#[inline(always)]
25986#[target_feature(enable = "sve")]
25987#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
25988#[cfg_attr(test, assert_instr(sminv))]
25989pub fn svminv_s64(pg: svbool_t, op: svint64_t) -> i64 {
25990    unsafe extern "unadjusted" {
25991        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv2i64")]
25992        fn _svminv_s64(pg: svbool2_t, op: svint64_t) -> i64;
25993    }
25994    unsafe { _svminv_s64(pg.sve_into(), op) }
25995}
25996#[doc = "Minimum reduction to scalar"]
25997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u8])"]
25998#[inline(always)]
25999#[target_feature(enable = "sve")]
26000#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26001#[cfg_attr(test, assert_instr(uminv))]
26002pub fn svminv_u8(pg: svbool_t, op: svuint8_t) -> u8 {
26003    unsafe extern "unadjusted" {
26004        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv16i8")]
26005        fn _svminv_u8(pg: svbool_t, op: svint8_t) -> i8;
26006    }
26007    unsafe { _svminv_u8(pg, op.as_signed()).as_unsigned() }
26008}
26009#[doc = "Minimum reduction to scalar"]
26010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u16])"]
26011#[inline(always)]
26012#[target_feature(enable = "sve")]
26013#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26014#[cfg_attr(test, assert_instr(uminv))]
26015pub fn svminv_u16(pg: svbool_t, op: svuint16_t) -> u16 {
26016    unsafe extern "unadjusted" {
26017        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv8i16")]
26018        fn _svminv_u16(pg: svbool8_t, op: svint16_t) -> i16;
26019    }
26020    unsafe { _svminv_u16(pg.sve_into(), op.as_signed()).as_unsigned() }
26021}
26022#[doc = "Minimum reduction to scalar"]
26023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u32])"]
26024#[inline(always)]
26025#[target_feature(enable = "sve")]
26026#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26027#[cfg_attr(test, assert_instr(uminv))]
26028pub fn svminv_u32(pg: svbool_t, op: svuint32_t) -> u32 {
26029    unsafe extern "unadjusted" {
26030        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv4i32")]
26031        fn _svminv_u32(pg: svbool4_t, op: svint32_t) -> i32;
26032    }
26033    unsafe { _svminv_u32(pg.sve_into(), op.as_signed()).as_unsigned() }
26034}
26035#[doc = "Minimum reduction to scalar"]
26036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u64])"]
26037#[inline(always)]
26038#[target_feature(enable = "sve")]
26039#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26040#[cfg_attr(test, assert_instr(uminv))]
26041pub fn svminv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
26042    unsafe extern "unadjusted" {
26043        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv2i64")]
26044        fn _svminv_u64(pg: svbool2_t, op: svint64_t) -> i64;
26045    }
26046    unsafe { _svminv_u64(pg.sve_into(), op.as_signed()).as_unsigned() }
26047}
26048#[doc = "Multiply-add, addend first"]
26049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_m)"]
26050#[inline(always)]
26051#[target_feature(enable = "sve")]
26052#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26053#[cfg_attr(test, assert_instr(fmla))]
26054pub fn svmla_f32_m(
26055    pg: svbool_t,
26056    op1: svfloat32_t,
26057    op2: svfloat32_t,
26058    op3: svfloat32_t,
26059) -> svfloat32_t {
26060    unsafe extern "unadjusted" {
26061        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv4f32")]
26062        fn _svmla_f32_m(
26063            pg: svbool4_t,
26064            op1: svfloat32_t,
26065            op2: svfloat32_t,
26066            op3: svfloat32_t,
26067        ) -> svfloat32_t;
26068    }
26069    unsafe { _svmla_f32_m(pg.sve_into(), op1, op2, op3) }
26070}
26071#[doc = "Multiply-add, addend first"]
26072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_m)"]
26073#[inline(always)]
26074#[target_feature(enable = "sve")]
26075#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26076#[cfg_attr(test, assert_instr(fmla))]
26077pub fn svmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
26078    svmla_f32_m(pg, op1, op2, svdup_n_f32(op3))
26079}
26080#[doc = "Multiply-add, addend first"]
26081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_x)"]
26082#[inline(always)]
26083#[target_feature(enable = "sve")]
26084#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26085#[cfg_attr(test, assert_instr(fmla))]
26086pub fn svmla_f32_x(
26087    pg: svbool_t,
26088    op1: svfloat32_t,
26089    op2: svfloat32_t,
26090    op3: svfloat32_t,
26091) -> svfloat32_t {
26092    svmla_f32_m(pg, op1, op2, op3)
26093}
26094#[doc = "Multiply-add, addend first"]
26095#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_x)"]
26096#[inline(always)]
26097#[target_feature(enable = "sve")]
26098#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26099#[cfg_attr(test, assert_instr(fmla))]
26100pub fn svmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
26101    svmla_f32_x(pg, op1, op2, svdup_n_f32(op3))
26102}
26103#[doc = "Multiply-add, addend first"]
26104#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_z)"]
26105#[inline(always)]
26106#[target_feature(enable = "sve")]
26107#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26108#[cfg_attr(test, assert_instr(fmla))]
26109pub fn svmla_f32_z(
26110    pg: svbool_t,
26111    op1: svfloat32_t,
26112    op2: svfloat32_t,
26113    op3: svfloat32_t,
26114) -> svfloat32_t {
26115    svmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
26116}
26117#[doc = "Multiply-add, addend first"]
26118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_z)"]
26119#[inline(always)]
26120#[target_feature(enable = "sve")]
26121#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26122#[cfg_attr(test, assert_instr(fmla))]
26123pub fn svmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
26124    svmla_f32_z(pg, op1, op2, svdup_n_f32(op3))
26125}
26126#[doc = "Multiply-add, addend first"]
26127#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_m)"]
26128#[inline(always)]
26129#[target_feature(enable = "sve")]
26130#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26131#[cfg_attr(test, assert_instr(fmla))]
26132pub fn svmla_f64_m(
26133    pg: svbool_t,
26134    op1: svfloat64_t,
26135    op2: svfloat64_t,
26136    op3: svfloat64_t,
26137) -> svfloat64_t {
26138    unsafe extern "unadjusted" {
26139        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv2f64")]
26140        fn _svmla_f64_m(
26141            pg: svbool2_t,
26142            op1: svfloat64_t,
26143            op2: svfloat64_t,
26144            op3: svfloat64_t,
26145        ) -> svfloat64_t;
26146    }
26147    unsafe { _svmla_f64_m(pg.sve_into(), op1, op2, op3) }
26148}
26149#[doc = "Multiply-add, addend first"]
26150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_m)"]
26151#[inline(always)]
26152#[target_feature(enable = "sve")]
26153#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26154#[cfg_attr(test, assert_instr(fmla))]
26155pub fn svmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
26156    svmla_f64_m(pg, op1, op2, svdup_n_f64(op3))
26157}
26158#[doc = "Multiply-add, addend first"]
26159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_x)"]
26160#[inline(always)]
26161#[target_feature(enable = "sve")]
26162#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26163#[cfg_attr(test, assert_instr(fmla))]
26164pub fn svmla_f64_x(
26165    pg: svbool_t,
26166    op1: svfloat64_t,
26167    op2: svfloat64_t,
26168    op3: svfloat64_t,
26169) -> svfloat64_t {
26170    svmla_f64_m(pg, op1, op2, op3)
26171}
26172#[doc = "Multiply-add, addend first"]
26173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_x)"]
26174#[inline(always)]
26175#[target_feature(enable = "sve")]
26176#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26177#[cfg_attr(test, assert_instr(fmla))]
26178pub fn svmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
26179    svmla_f64_x(pg, op1, op2, svdup_n_f64(op3))
26180}
26181#[doc = "Multiply-add, addend first"]
26182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_z)"]
26183#[inline(always)]
26184#[target_feature(enable = "sve")]
26185#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26186#[cfg_attr(test, assert_instr(fmla))]
26187pub fn svmla_f64_z(
26188    pg: svbool_t,
26189    op1: svfloat64_t,
26190    op2: svfloat64_t,
26191    op3: svfloat64_t,
26192) -> svfloat64_t {
26193    svmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
26194}
26195#[doc = "Multiply-add, addend first"]
26196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_z)"]
26197#[inline(always)]
26198#[target_feature(enable = "sve")]
26199#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26200#[cfg_attr(test, assert_instr(fmla))]
26201pub fn svmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
26202    svmla_f64_z(pg, op1, op2, svdup_n_f64(op3))
26203}
26204#[doc = "Multiply-add, addend first"]
26205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_m)"]
26206#[inline(always)]
26207#[target_feature(enable = "sve")]
26208#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26209#[cfg_attr(test, assert_instr(mla))]
26210pub fn svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
26211    unsafe extern "unadjusted" {
26212        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv16i8")]
26213        fn _svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
26214    }
26215    unsafe { _svmla_s8_m(pg, op1, op2, op3) }
26216}
26217#[doc = "Multiply-add, addend first"]
26218#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_m)"]
26219#[inline(always)]
26220#[target_feature(enable = "sve")]
26221#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26222#[cfg_attr(test, assert_instr(mla))]
26223pub fn svmla_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
26224    svmla_s8_m(pg, op1, op2, svdup_n_s8(op3))
26225}
26226#[doc = "Multiply-add, addend first"]
26227#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_x)"]
26228#[inline(always)]
26229#[target_feature(enable = "sve")]
26230#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26231#[cfg_attr(test, assert_instr(mla))]
26232pub fn svmla_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
26233    svmla_s8_m(pg, op1, op2, op3)
26234}
26235#[doc = "Multiply-add, addend first"]
26236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_x)"]
26237#[inline(always)]
26238#[target_feature(enable = "sve")]
26239#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26240#[cfg_attr(test, assert_instr(mla))]
26241pub fn svmla_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
26242    svmla_s8_x(pg, op1, op2, svdup_n_s8(op3))
26243}
26244#[doc = "Multiply-add, addend first"]
26245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_z)"]
26246#[inline(always)]
26247#[target_feature(enable = "sve")]
26248#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26249#[cfg_attr(test, assert_instr(mla))]
26250pub fn svmla_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
26251    svmla_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3)
26252}
26253#[doc = "Multiply-add, addend first"]
26254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_z)"]
26255#[inline(always)]
26256#[target_feature(enable = "sve")]
26257#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26258#[cfg_attr(test, assert_instr(mla))]
26259pub fn svmla_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
26260    svmla_s8_z(pg, op1, op2, svdup_n_s8(op3))
26261}
26262#[doc = "Multiply-add, addend first"]
26263#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_m)"]
26264#[inline(always)]
26265#[target_feature(enable = "sve")]
26266#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26267#[cfg_attr(test, assert_instr(mla))]
26268pub fn svmla_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
26269    unsafe extern "unadjusted" {
26270        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv8i16")]
26271        fn _svmla_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t)
26272            -> svint16_t;
26273    }
26274    unsafe { _svmla_s16_m(pg.sve_into(), op1, op2, op3) }
26275}
26276#[doc = "Multiply-add, addend first"]
26277#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_m)"]
26278#[inline(always)]
26279#[target_feature(enable = "sve")]
26280#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26281#[cfg_attr(test, assert_instr(mla))]
26282pub fn svmla_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
26283    svmla_s16_m(pg, op1, op2, svdup_n_s16(op3))
26284}
26285#[doc = "Multiply-add, addend first"]
26286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_x)"]
26287#[inline(always)]
26288#[target_feature(enable = "sve")]
26289#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26290#[cfg_attr(test, assert_instr(mla))]
26291pub fn svmla_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
26292    svmla_s16_m(pg, op1, op2, op3)
26293}
26294#[doc = "Multiply-add, addend first"]
26295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_x)"]
26296#[inline(always)]
26297#[target_feature(enable = "sve")]
26298#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26299#[cfg_attr(test, assert_instr(mla))]
26300pub fn svmla_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
26301    svmla_s16_x(pg, op1, op2, svdup_n_s16(op3))
26302}
26303#[doc = "Multiply-add, addend first"]
26304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_z)"]
26305#[inline(always)]
26306#[target_feature(enable = "sve")]
26307#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26308#[cfg_attr(test, assert_instr(mla))]
26309pub fn svmla_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
26310    svmla_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3)
26311}
26312#[doc = "Multiply-add, addend first"]
26313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_z)"]
26314#[inline(always)]
26315#[target_feature(enable = "sve")]
26316#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26317#[cfg_attr(test, assert_instr(mla))]
26318pub fn svmla_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
26319    svmla_s16_z(pg, op1, op2, svdup_n_s16(op3))
26320}
26321#[doc = "Multiply-add, addend first"]
26322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_m)"]
26323#[inline(always)]
26324#[target_feature(enable = "sve")]
26325#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26326#[cfg_attr(test, assert_instr(mla))]
26327pub fn svmla_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
26328    unsafe extern "unadjusted" {
26329        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv4i32")]
26330        fn _svmla_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t)
26331            -> svint32_t;
26332    }
26333    unsafe { _svmla_s32_m(pg.sve_into(), op1, op2, op3) }
26334}
26335#[doc = "Multiply-add, addend first"]
26336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_m)"]
26337#[inline(always)]
26338#[target_feature(enable = "sve")]
26339#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26340#[cfg_attr(test, assert_instr(mla))]
26341pub fn svmla_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
26342    svmla_s32_m(pg, op1, op2, svdup_n_s32(op3))
26343}
26344#[doc = "Multiply-add, addend first"]
26345#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_x)"]
26346#[inline(always)]
26347#[target_feature(enable = "sve")]
26348#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26349#[cfg_attr(test, assert_instr(mla))]
26350pub fn svmla_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
26351    svmla_s32_m(pg, op1, op2, op3)
26352}
26353#[doc = "Multiply-add, addend first"]
26354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_x)"]
26355#[inline(always)]
26356#[target_feature(enable = "sve")]
26357#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26358#[cfg_attr(test, assert_instr(mla))]
26359pub fn svmla_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
26360    svmla_s32_x(pg, op1, op2, svdup_n_s32(op3))
26361}
26362#[doc = "Multiply-add, addend first"]
26363#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_z)"]
26364#[inline(always)]
26365#[target_feature(enable = "sve")]
26366#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26367#[cfg_attr(test, assert_instr(mla))]
26368pub fn svmla_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
26369    svmla_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3)
26370}
26371#[doc = "Multiply-add, addend first"]
26372#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_z)"]
26373#[inline(always)]
26374#[target_feature(enable = "sve")]
26375#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26376#[cfg_attr(test, assert_instr(mla))]
26377pub fn svmla_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
26378    svmla_s32_z(pg, op1, op2, svdup_n_s32(op3))
26379}
26380#[doc = "Multiply-add, addend first"]
26381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_m)"]
26382#[inline(always)]
26383#[target_feature(enable = "sve")]
26384#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26385#[cfg_attr(test, assert_instr(mla))]
26386pub fn svmla_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
26387    unsafe extern "unadjusted" {
26388        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv2i64")]
26389        fn _svmla_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t)
26390            -> svint64_t;
26391    }
26392    unsafe { _svmla_s64_m(pg.sve_into(), op1, op2, op3) }
26393}
26394#[doc = "Multiply-add, addend first"]
26395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_m)"]
26396#[inline(always)]
26397#[target_feature(enable = "sve")]
26398#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26399#[cfg_attr(test, assert_instr(mla))]
26400pub fn svmla_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
26401    svmla_s64_m(pg, op1, op2, svdup_n_s64(op3))
26402}
26403#[doc = "Multiply-add, addend first"]
26404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_x)"]
26405#[inline(always)]
26406#[target_feature(enable = "sve")]
26407#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26408#[cfg_attr(test, assert_instr(mla))]
26409pub fn svmla_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
26410    svmla_s64_m(pg, op1, op2, op3)
26411}
26412#[doc = "Multiply-add, addend first"]
26413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_x)"]
26414#[inline(always)]
26415#[target_feature(enable = "sve")]
26416#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26417#[cfg_attr(test, assert_instr(mla))]
26418pub fn svmla_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
26419    svmla_s64_x(pg, op1, op2, svdup_n_s64(op3))
26420}
26421#[doc = "Multiply-add, addend first"]
26422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_z)"]
26423#[inline(always)]
26424#[target_feature(enable = "sve")]
26425#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26426#[cfg_attr(test, assert_instr(mla))]
26427pub fn svmla_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
26428    svmla_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3)
26429}
26430#[doc = "Multiply-add, addend first"]
26431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_z)"]
26432#[inline(always)]
26433#[target_feature(enable = "sve")]
26434#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26435#[cfg_attr(test, assert_instr(mla))]
26436pub fn svmla_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
26437    svmla_s64_z(pg, op1, op2, svdup_n_s64(op3))
26438}
26439#[doc = "Multiply-add, addend first"]
26440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_m)"]
26441#[inline(always)]
26442#[target_feature(enable = "sve")]
26443#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26444#[cfg_attr(test, assert_instr(mla))]
26445pub fn svmla_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
26446    unsafe { svmla_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
26447}
26448#[doc = "Multiply-add, addend first"]
26449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_m)"]
26450#[inline(always)]
26451#[target_feature(enable = "sve")]
26452#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26453#[cfg_attr(test, assert_instr(mla))]
26454pub fn svmla_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
26455    svmla_u8_m(pg, op1, op2, svdup_n_u8(op3))
26456}
26457#[doc = "Multiply-add, addend first"]
26458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_x)"]
26459#[inline(always)]
26460#[target_feature(enable = "sve")]
26461#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26462#[cfg_attr(test, assert_instr(mla))]
26463pub fn svmla_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
26464    svmla_u8_m(pg, op1, op2, op3)
26465}
26466#[doc = "Multiply-add, addend first"]
26467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_x)"]
26468#[inline(always)]
26469#[target_feature(enable = "sve")]
26470#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26471#[cfg_attr(test, assert_instr(mla))]
26472pub fn svmla_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
26473    svmla_u8_x(pg, op1, op2, svdup_n_u8(op3))
26474}
26475#[doc = "Multiply-add, addend first"]
26476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_z)"]
26477#[inline(always)]
26478#[target_feature(enable = "sve")]
26479#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26480#[cfg_attr(test, assert_instr(mla))]
26481pub fn svmla_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
26482    svmla_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3)
26483}
26484#[doc = "Multiply-add, addend first"]
26485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_z)"]
26486#[inline(always)]
26487#[target_feature(enable = "sve")]
26488#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26489#[cfg_attr(test, assert_instr(mla))]
26490pub fn svmla_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
26491    svmla_u8_z(pg, op1, op2, svdup_n_u8(op3))
26492}
26493#[doc = "Multiply-add, addend first"]
26494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_m)"]
26495#[inline(always)]
26496#[target_feature(enable = "sve")]
26497#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26498#[cfg_attr(test, assert_instr(mla))]
26499pub fn svmla_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
26500    unsafe { svmla_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
26501}
26502#[doc = "Multiply-add, addend first"]
26503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_m)"]
26504#[inline(always)]
26505#[target_feature(enable = "sve")]
26506#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26507#[cfg_attr(test, assert_instr(mla))]
26508pub fn svmla_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
26509    svmla_u16_m(pg, op1, op2, svdup_n_u16(op3))
26510}
26511#[doc = "Multiply-add, addend first"]
26512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_x)"]
26513#[inline(always)]
26514#[target_feature(enable = "sve")]
26515#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26516#[cfg_attr(test, assert_instr(mla))]
26517pub fn svmla_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
26518    svmla_u16_m(pg, op1, op2, op3)
26519}
26520#[doc = "Multiply-add, addend first"]
26521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_x)"]
26522#[inline(always)]
26523#[target_feature(enable = "sve")]
26524#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26525#[cfg_attr(test, assert_instr(mla))]
26526pub fn svmla_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
26527    svmla_u16_x(pg, op1, op2, svdup_n_u16(op3))
26528}
26529#[doc = "Multiply-add, addend first"]
26530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_z)"]
26531#[inline(always)]
26532#[target_feature(enable = "sve")]
26533#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26534#[cfg_attr(test, assert_instr(mla))]
26535pub fn svmla_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
26536    svmla_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3)
26537}
26538#[doc = "Multiply-add, addend first"]
26539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_z)"]
26540#[inline(always)]
26541#[target_feature(enable = "sve")]
26542#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26543#[cfg_attr(test, assert_instr(mla))]
26544pub fn svmla_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
26545    svmla_u16_z(pg, op1, op2, svdup_n_u16(op3))
26546}
26547#[doc = "Multiply-add, addend first"]
26548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_m)"]
26549#[inline(always)]
26550#[target_feature(enable = "sve")]
26551#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26552#[cfg_attr(test, assert_instr(mla))]
26553pub fn svmla_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
26554    unsafe { svmla_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
26555}
26556#[doc = "Multiply-add, addend first"]
26557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_m)"]
26558#[inline(always)]
26559#[target_feature(enable = "sve")]
26560#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26561#[cfg_attr(test, assert_instr(mla))]
26562pub fn svmla_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
26563    svmla_u32_m(pg, op1, op2, svdup_n_u32(op3))
26564}
26565#[doc = "Multiply-add, addend first"]
26566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_x)"]
26567#[inline(always)]
26568#[target_feature(enable = "sve")]
26569#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26570#[cfg_attr(test, assert_instr(mla))]
26571pub fn svmla_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
26572    svmla_u32_m(pg, op1, op2, op3)
26573}
26574#[doc = "Multiply-add, addend first"]
26575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_x)"]
26576#[inline(always)]
26577#[target_feature(enable = "sve")]
26578#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26579#[cfg_attr(test, assert_instr(mla))]
26580pub fn svmla_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
26581    svmla_u32_x(pg, op1, op2, svdup_n_u32(op3))
26582}
26583#[doc = "Multiply-add, addend first"]
26584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_z)"]
26585#[inline(always)]
26586#[target_feature(enable = "sve")]
26587#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26588#[cfg_attr(test, assert_instr(mla))]
26589pub fn svmla_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
26590    svmla_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3)
26591}
26592#[doc = "Multiply-add, addend first"]
26593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_z)"]
26594#[inline(always)]
26595#[target_feature(enable = "sve")]
26596#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26597#[cfg_attr(test, assert_instr(mla))]
26598pub fn svmla_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
26599    svmla_u32_z(pg, op1, op2, svdup_n_u32(op3))
26600}
26601#[doc = "Multiply-add, addend first"]
26602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_m)"]
26603#[inline(always)]
26604#[target_feature(enable = "sve")]
26605#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26606#[cfg_attr(test, assert_instr(mla))]
26607pub fn svmla_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
26608    unsafe { svmla_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
26609}
26610#[doc = "Multiply-add, addend first"]
26611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_m)"]
26612#[inline(always)]
26613#[target_feature(enable = "sve")]
26614#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26615#[cfg_attr(test, assert_instr(mla))]
26616pub fn svmla_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
26617    svmla_u64_m(pg, op1, op2, svdup_n_u64(op3))
26618}
26619#[doc = "Multiply-add, addend first"]
26620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_x)"]
26621#[inline(always)]
26622#[target_feature(enable = "sve")]
26623#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26624#[cfg_attr(test, assert_instr(mla))]
26625pub fn svmla_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
26626    svmla_u64_m(pg, op1, op2, op3)
26627}
26628#[doc = "Multiply-add, addend first"]
26629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_x)"]
26630#[inline(always)]
26631#[target_feature(enable = "sve")]
26632#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26633#[cfg_attr(test, assert_instr(mla))]
26634pub fn svmla_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
26635    svmla_u64_x(pg, op1, op2, svdup_n_u64(op3))
26636}
26637#[doc = "Multiply-add, addend first"]
26638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_z)"]
26639#[inline(always)]
26640#[target_feature(enable = "sve")]
26641#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26642#[cfg_attr(test, assert_instr(mla))]
26643pub fn svmla_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
26644    svmla_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3)
26645}
26646#[doc = "Multiply-add, addend first"]
26647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_z)"]
26648#[inline(always)]
26649#[target_feature(enable = "sve")]
26650#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26651#[cfg_attr(test, assert_instr(mla))]
26652pub fn svmla_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
26653    svmla_u64_z(pg, op1, op2, svdup_n_u64(op3))
26654}
26655#[doc = "Multiply-add, addend first"]
26656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f32])"]
26657#[inline(always)]
26658#[target_feature(enable = "sve")]
26659#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26660#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))]
26661pub fn svmla_lane_f32<const IMM_INDEX: i32>(
26662    op1: svfloat32_t,
26663    op2: svfloat32_t,
26664    op3: svfloat32_t,
26665) -> svfloat32_t {
26666    static_assert_range!(IMM_INDEX, 0..=3);
26667    unsafe extern "unadjusted" {
26668        #[cfg_attr(
26669            target_arch = "aarch64",
26670            link_name = "llvm.aarch64.sve.fmla.lane.nxv4f32"
26671        )]
26672        fn _svmla_lane_f32(
26673            op1: svfloat32_t,
26674            op2: svfloat32_t,
26675            op3: svfloat32_t,
26676            IMM_INDEX: i32,
26677        ) -> svfloat32_t;
26678    }
26679    unsafe { _svmla_lane_f32(op1, op2, op3, IMM_INDEX) }
26680}
26681#[doc = "Multiply-add, addend first"]
26682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f64])"]
26683#[inline(always)]
26684#[target_feature(enable = "sve")]
26685#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26686#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))]
26687pub fn svmla_lane_f64<const IMM_INDEX: i32>(
26688    op1: svfloat64_t,
26689    op2: svfloat64_t,
26690    op3: svfloat64_t,
26691) -> svfloat64_t {
26692    static_assert_range!(IMM_INDEX, 0..=1);
26693    unsafe extern "unadjusted" {
26694        #[cfg_attr(
26695            target_arch = "aarch64",
26696            link_name = "llvm.aarch64.sve.fmla.lane.nxv2f64"
26697        )]
26698        fn _svmla_lane_f64(
26699            op1: svfloat64_t,
26700            op2: svfloat64_t,
26701            op3: svfloat64_t,
26702            IMM_INDEX: i32,
26703        ) -> svfloat64_t;
26704    }
26705    unsafe { _svmla_lane_f64(op1, op2, op3, IMM_INDEX) }
26706}
26707#[doc = "Multiply-subtract, minuend first"]
26708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_m)"]
26709#[inline(always)]
26710#[target_feature(enable = "sve")]
26711#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26712#[cfg_attr(test, assert_instr(fmls))]
26713pub fn svmls_f32_m(
26714    pg: svbool_t,
26715    op1: svfloat32_t,
26716    op2: svfloat32_t,
26717    op3: svfloat32_t,
26718) -> svfloat32_t {
26719    unsafe extern "unadjusted" {
26720        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv4f32")]
26721        fn _svmls_f32_m(
26722            pg: svbool4_t,
26723            op1: svfloat32_t,
26724            op2: svfloat32_t,
26725            op3: svfloat32_t,
26726        ) -> svfloat32_t;
26727    }
26728    unsafe { _svmls_f32_m(pg.sve_into(), op1, op2, op3) }
26729}
26730#[doc = "Multiply-subtract, minuend first"]
26731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_m)"]
26732#[inline(always)]
26733#[target_feature(enable = "sve")]
26734#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26735#[cfg_attr(test, assert_instr(fmls))]
26736pub fn svmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
26737    svmls_f32_m(pg, op1, op2, svdup_n_f32(op3))
26738}
26739#[doc = "Multiply-subtract, minuend first"]
26740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_x)"]
26741#[inline(always)]
26742#[target_feature(enable = "sve")]
26743#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26744#[cfg_attr(test, assert_instr(fmls))]
26745pub fn svmls_f32_x(
26746    pg: svbool_t,
26747    op1: svfloat32_t,
26748    op2: svfloat32_t,
26749    op3: svfloat32_t,
26750) -> svfloat32_t {
26751    svmls_f32_m(pg, op1, op2, op3)
26752}
26753#[doc = "Multiply-subtract, minuend first"]
26754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_x)"]
26755#[inline(always)]
26756#[target_feature(enable = "sve")]
26757#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26758#[cfg_attr(test, assert_instr(fmls))]
26759pub fn svmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
26760    svmls_f32_x(pg, op1, op2, svdup_n_f32(op3))
26761}
26762#[doc = "Multiply-subtract, minuend first"]
26763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_z)"]
26764#[inline(always)]
26765#[target_feature(enable = "sve")]
26766#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26767#[cfg_attr(test, assert_instr(fmls))]
26768pub fn svmls_f32_z(
26769    pg: svbool_t,
26770    op1: svfloat32_t,
26771    op2: svfloat32_t,
26772    op3: svfloat32_t,
26773) -> svfloat32_t {
26774    svmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
26775}
26776#[doc = "Multiply-subtract, minuend first"]
26777#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_z)"]
26778#[inline(always)]
26779#[target_feature(enable = "sve")]
26780#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26781#[cfg_attr(test, assert_instr(fmls))]
26782pub fn svmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
26783    svmls_f32_z(pg, op1, op2, svdup_n_f32(op3))
26784}
26785#[doc = "Multiply-subtract, minuend first"]
26786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_m)"]
26787#[inline(always)]
26788#[target_feature(enable = "sve")]
26789#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26790#[cfg_attr(test, assert_instr(fmls))]
26791pub fn svmls_f64_m(
26792    pg: svbool_t,
26793    op1: svfloat64_t,
26794    op2: svfloat64_t,
26795    op3: svfloat64_t,
26796) -> svfloat64_t {
26797    unsafe extern "unadjusted" {
26798        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv2f64")]
26799        fn _svmls_f64_m(
26800            pg: svbool2_t,
26801            op1: svfloat64_t,
26802            op2: svfloat64_t,
26803            op3: svfloat64_t,
26804        ) -> svfloat64_t;
26805    }
26806    unsafe { _svmls_f64_m(pg.sve_into(), op1, op2, op3) }
26807}
26808#[doc = "Multiply-subtract, minuend first"]
26809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_m)"]
26810#[inline(always)]
26811#[target_feature(enable = "sve")]
26812#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26813#[cfg_attr(test, assert_instr(fmls))]
26814pub fn svmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
26815    svmls_f64_m(pg, op1, op2, svdup_n_f64(op3))
26816}
26817#[doc = "Multiply-subtract, minuend first"]
26818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_x)"]
26819#[inline(always)]
26820#[target_feature(enable = "sve")]
26821#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26822#[cfg_attr(test, assert_instr(fmls))]
26823pub fn svmls_f64_x(
26824    pg: svbool_t,
26825    op1: svfloat64_t,
26826    op2: svfloat64_t,
26827    op3: svfloat64_t,
26828) -> svfloat64_t {
26829    svmls_f64_m(pg, op1, op2, op3)
26830}
26831#[doc = "Multiply-subtract, minuend first"]
26832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_x)"]
26833#[inline(always)]
26834#[target_feature(enable = "sve")]
26835#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26836#[cfg_attr(test, assert_instr(fmls))]
26837pub fn svmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
26838    svmls_f64_x(pg, op1, op2, svdup_n_f64(op3))
26839}
26840#[doc = "Multiply-subtract, minuend first"]
26841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_z)"]
26842#[inline(always)]
26843#[target_feature(enable = "sve")]
26844#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26845#[cfg_attr(test, assert_instr(fmls))]
26846pub fn svmls_f64_z(
26847    pg: svbool_t,
26848    op1: svfloat64_t,
26849    op2: svfloat64_t,
26850    op3: svfloat64_t,
26851) -> svfloat64_t {
26852    svmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
26853}
26854#[doc = "Multiply-subtract, minuend first"]
26855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_z)"]
26856#[inline(always)]
26857#[target_feature(enable = "sve")]
26858#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26859#[cfg_attr(test, assert_instr(fmls))]
26860pub fn svmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
26861    svmls_f64_z(pg, op1, op2, svdup_n_f64(op3))
26862}
26863#[doc = "Multiply-subtract, minuend first"]
26864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_m)"]
26865#[inline(always)]
26866#[target_feature(enable = "sve")]
26867#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26868#[cfg_attr(test, assert_instr(mls))]
26869pub fn svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
26870    unsafe extern "unadjusted" {
26871        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv16i8")]
26872        fn _svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
26873    }
26874    unsafe { _svmls_s8_m(pg, op1, op2, op3) }
26875}
26876#[doc = "Multiply-subtract, minuend first"]
26877#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_m)"]
26878#[inline(always)]
26879#[target_feature(enable = "sve")]
26880#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26881#[cfg_attr(test, assert_instr(mls))]
26882pub fn svmls_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
26883    svmls_s8_m(pg, op1, op2, svdup_n_s8(op3))
26884}
26885#[doc = "Multiply-subtract, minuend first"]
26886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_x)"]
26887#[inline(always)]
26888#[target_feature(enable = "sve")]
26889#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26890#[cfg_attr(test, assert_instr(mls))]
26891pub fn svmls_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
26892    svmls_s8_m(pg, op1, op2, op3)
26893}
26894#[doc = "Multiply-subtract, minuend first"]
26895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_x)"]
26896#[inline(always)]
26897#[target_feature(enable = "sve")]
26898#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26899#[cfg_attr(test, assert_instr(mls))]
26900pub fn svmls_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
26901    svmls_s8_x(pg, op1, op2, svdup_n_s8(op3))
26902}
26903#[doc = "Multiply-subtract, minuend first"]
26904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_z)"]
26905#[inline(always)]
26906#[target_feature(enable = "sve")]
26907#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26908#[cfg_attr(test, assert_instr(mls))]
26909pub fn svmls_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
26910    svmls_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3)
26911}
26912#[doc = "Multiply-subtract, minuend first"]
26913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_z)"]
26914#[inline(always)]
26915#[target_feature(enable = "sve")]
26916#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26917#[cfg_attr(test, assert_instr(mls))]
26918pub fn svmls_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
26919    svmls_s8_z(pg, op1, op2, svdup_n_s8(op3))
26920}
26921#[doc = "Multiply-subtract, minuend first"]
26922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_m)"]
26923#[inline(always)]
26924#[target_feature(enable = "sve")]
26925#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26926#[cfg_attr(test, assert_instr(mls))]
26927pub fn svmls_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
26928    unsafe extern "unadjusted" {
26929        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv8i16")]
26930        fn _svmls_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t)
26931            -> svint16_t;
26932    }
26933    unsafe { _svmls_s16_m(pg.sve_into(), op1, op2, op3) }
26934}
26935#[doc = "Multiply-subtract, minuend first"]
26936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_m)"]
26937#[inline(always)]
26938#[target_feature(enable = "sve")]
26939#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26940#[cfg_attr(test, assert_instr(mls))]
26941pub fn svmls_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
26942    svmls_s16_m(pg, op1, op2, svdup_n_s16(op3))
26943}
26944#[doc = "Multiply-subtract, minuend first"]
26945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_x)"]
26946#[inline(always)]
26947#[target_feature(enable = "sve")]
26948#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26949#[cfg_attr(test, assert_instr(mls))]
26950pub fn svmls_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
26951    svmls_s16_m(pg, op1, op2, op3)
26952}
26953#[doc = "Multiply-subtract, minuend first"]
26954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_x)"]
26955#[inline(always)]
26956#[target_feature(enable = "sve")]
26957#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26958#[cfg_attr(test, assert_instr(mls))]
26959pub fn svmls_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
26960    svmls_s16_x(pg, op1, op2, svdup_n_s16(op3))
26961}
26962#[doc = "Multiply-subtract, minuend first"]
26963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_z)"]
26964#[inline(always)]
26965#[target_feature(enable = "sve")]
26966#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26967#[cfg_attr(test, assert_instr(mls))]
26968pub fn svmls_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
26969    svmls_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3)
26970}
26971#[doc = "Multiply-subtract, minuend first"]
26972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_z)"]
26973#[inline(always)]
26974#[target_feature(enable = "sve")]
26975#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26976#[cfg_attr(test, assert_instr(mls))]
26977pub fn svmls_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
26978    svmls_s16_z(pg, op1, op2, svdup_n_s16(op3))
26979}
26980#[doc = "Multiply-subtract, minuend first"]
26981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_m)"]
26982#[inline(always)]
26983#[target_feature(enable = "sve")]
26984#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26985#[cfg_attr(test, assert_instr(mls))]
26986pub fn svmls_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
26987    unsafe extern "unadjusted" {
26988        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv4i32")]
26989        fn _svmls_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t)
26990            -> svint32_t;
26991    }
26992    unsafe { _svmls_s32_m(pg.sve_into(), op1, op2, op3) }
26993}
26994#[doc = "Multiply-subtract, minuend first"]
26995#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_m)"]
26996#[inline(always)]
26997#[target_feature(enable = "sve")]
26998#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
26999#[cfg_attr(test, assert_instr(mls))]
27000pub fn svmls_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
27001    svmls_s32_m(pg, op1, op2, svdup_n_s32(op3))
27002}
27003#[doc = "Multiply-subtract, minuend first"]
27004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_x)"]
27005#[inline(always)]
27006#[target_feature(enable = "sve")]
27007#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27008#[cfg_attr(test, assert_instr(mls))]
27009pub fn svmls_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
27010    svmls_s32_m(pg, op1, op2, op3)
27011}
27012#[doc = "Multiply-subtract, minuend first"]
27013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_x)"]
27014#[inline(always)]
27015#[target_feature(enable = "sve")]
27016#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27017#[cfg_attr(test, assert_instr(mls))]
27018pub fn svmls_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
27019    svmls_s32_x(pg, op1, op2, svdup_n_s32(op3))
27020}
27021#[doc = "Multiply-subtract, minuend first"]
27022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_z)"]
27023#[inline(always)]
27024#[target_feature(enable = "sve")]
27025#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27026#[cfg_attr(test, assert_instr(mls))]
27027pub fn svmls_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
27028    svmls_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3)
27029}
27030#[doc = "Multiply-subtract, minuend first"]
27031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_z)"]
27032#[inline(always)]
27033#[target_feature(enable = "sve")]
27034#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27035#[cfg_attr(test, assert_instr(mls))]
27036pub fn svmls_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
27037    svmls_s32_z(pg, op1, op2, svdup_n_s32(op3))
27038}
27039#[doc = "Multiply-subtract, minuend first"]
27040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_m)"]
27041#[inline(always)]
27042#[target_feature(enable = "sve")]
27043#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27044#[cfg_attr(test, assert_instr(mls))]
27045pub fn svmls_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
27046    unsafe extern "unadjusted" {
27047        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv2i64")]
27048        fn _svmls_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t)
27049            -> svint64_t;
27050    }
27051    unsafe { _svmls_s64_m(pg.sve_into(), op1, op2, op3) }
27052}
27053#[doc = "Multiply-subtract, minuend first"]
27054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_m)"]
27055#[inline(always)]
27056#[target_feature(enable = "sve")]
27057#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27058#[cfg_attr(test, assert_instr(mls))]
27059pub fn svmls_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
27060    svmls_s64_m(pg, op1, op2, svdup_n_s64(op3))
27061}
27062#[doc = "Multiply-subtract, minuend first"]
27063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_x)"]
27064#[inline(always)]
27065#[target_feature(enable = "sve")]
27066#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27067#[cfg_attr(test, assert_instr(mls))]
27068pub fn svmls_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
27069    svmls_s64_m(pg, op1, op2, op3)
27070}
27071#[doc = "Multiply-subtract, minuend first"]
27072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_x)"]
27073#[inline(always)]
27074#[target_feature(enable = "sve")]
27075#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27076#[cfg_attr(test, assert_instr(mls))]
27077pub fn svmls_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
27078    svmls_s64_x(pg, op1, op2, svdup_n_s64(op3))
27079}
27080#[doc = "Multiply-subtract, minuend first"]
27081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_z)"]
27082#[inline(always)]
27083#[target_feature(enable = "sve")]
27084#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27085#[cfg_attr(test, assert_instr(mls))]
27086pub fn svmls_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
27087    svmls_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3)
27088}
27089#[doc = "Multiply-subtract, minuend first"]
27090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_z)"]
27091#[inline(always)]
27092#[target_feature(enable = "sve")]
27093#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27094#[cfg_attr(test, assert_instr(mls))]
27095pub fn svmls_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
27096    svmls_s64_z(pg, op1, op2, svdup_n_s64(op3))
27097}
27098#[doc = "Multiply-subtract, minuend first"]
27099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_m)"]
27100#[inline(always)]
27101#[target_feature(enable = "sve")]
27102#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27103#[cfg_attr(test, assert_instr(mls))]
27104pub fn svmls_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
27105    unsafe { svmls_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
27106}
27107#[doc = "Multiply-subtract, minuend first"]
27108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_m)"]
27109#[inline(always)]
27110#[target_feature(enable = "sve")]
27111#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27112#[cfg_attr(test, assert_instr(mls))]
27113pub fn svmls_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
27114    svmls_u8_m(pg, op1, op2, svdup_n_u8(op3))
27115}
27116#[doc = "Multiply-subtract, minuend first"]
27117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_x)"]
27118#[inline(always)]
27119#[target_feature(enable = "sve")]
27120#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27121#[cfg_attr(test, assert_instr(mls))]
27122pub fn svmls_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
27123    svmls_u8_m(pg, op1, op2, op3)
27124}
27125#[doc = "Multiply-subtract, minuend first"]
27126#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_x)"]
27127#[inline(always)]
27128#[target_feature(enable = "sve")]
27129#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27130#[cfg_attr(test, assert_instr(mls))]
27131pub fn svmls_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
27132    svmls_u8_x(pg, op1, op2, svdup_n_u8(op3))
27133}
27134#[doc = "Multiply-subtract, minuend first"]
27135#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_z)"]
27136#[inline(always)]
27137#[target_feature(enable = "sve")]
27138#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27139#[cfg_attr(test, assert_instr(mls))]
27140pub fn svmls_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
27141    svmls_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3)
27142}
27143#[doc = "Multiply-subtract, minuend first"]
27144#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_z)"]
27145#[inline(always)]
27146#[target_feature(enable = "sve")]
27147#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27148#[cfg_attr(test, assert_instr(mls))]
27149pub fn svmls_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
27150    svmls_u8_z(pg, op1, op2, svdup_n_u8(op3))
27151}
27152#[doc = "Multiply-subtract, minuend first"]
27153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_m)"]
27154#[inline(always)]
27155#[target_feature(enable = "sve")]
27156#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27157#[cfg_attr(test, assert_instr(mls))]
27158pub fn svmls_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
27159    unsafe { svmls_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
27160}
27161#[doc = "Multiply-subtract, minuend first"]
27162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_m)"]
27163#[inline(always)]
27164#[target_feature(enable = "sve")]
27165#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27166#[cfg_attr(test, assert_instr(mls))]
27167pub fn svmls_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
27168    svmls_u16_m(pg, op1, op2, svdup_n_u16(op3))
27169}
27170#[doc = "Multiply-subtract, minuend first"]
27171#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_x)"]
27172#[inline(always)]
27173#[target_feature(enable = "sve")]
27174#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27175#[cfg_attr(test, assert_instr(mls))]
27176pub fn svmls_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
27177    svmls_u16_m(pg, op1, op2, op3)
27178}
27179#[doc = "Multiply-subtract, minuend first"]
27180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_x)"]
27181#[inline(always)]
27182#[target_feature(enable = "sve")]
27183#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27184#[cfg_attr(test, assert_instr(mls))]
27185pub fn svmls_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
27186    svmls_u16_x(pg, op1, op2, svdup_n_u16(op3))
27187}
27188#[doc = "Multiply-subtract, minuend first"]
27189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_z)"]
27190#[inline(always)]
27191#[target_feature(enable = "sve")]
27192#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27193#[cfg_attr(test, assert_instr(mls))]
27194pub fn svmls_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
27195    svmls_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3)
27196}
27197#[doc = "Multiply-subtract, minuend first"]
27198#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_z)"]
27199#[inline(always)]
27200#[target_feature(enable = "sve")]
27201#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27202#[cfg_attr(test, assert_instr(mls))]
27203pub fn svmls_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
27204    svmls_u16_z(pg, op1, op2, svdup_n_u16(op3))
27205}
27206#[doc = "Multiply-subtract, minuend first"]
27207#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_m)"]
27208#[inline(always)]
27209#[target_feature(enable = "sve")]
27210#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27211#[cfg_attr(test, assert_instr(mls))]
27212pub fn svmls_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
27213    unsafe { svmls_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
27214}
27215#[doc = "Multiply-subtract, minuend first"]
27216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_m)"]
27217#[inline(always)]
27218#[target_feature(enable = "sve")]
27219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27220#[cfg_attr(test, assert_instr(mls))]
27221pub fn svmls_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
27222    svmls_u32_m(pg, op1, op2, svdup_n_u32(op3))
27223}
27224#[doc = "Multiply-subtract, minuend first"]
27225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_x)"]
27226#[inline(always)]
27227#[target_feature(enable = "sve")]
27228#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27229#[cfg_attr(test, assert_instr(mls))]
27230pub fn svmls_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
27231    svmls_u32_m(pg, op1, op2, op3)
27232}
27233#[doc = "Multiply-subtract, minuend first"]
27234#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_x)"]
27235#[inline(always)]
27236#[target_feature(enable = "sve")]
27237#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27238#[cfg_attr(test, assert_instr(mls))]
27239pub fn svmls_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
27240    svmls_u32_x(pg, op1, op2, svdup_n_u32(op3))
27241}
27242#[doc = "Multiply-subtract, minuend first"]
27243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_z)"]
27244#[inline(always)]
27245#[target_feature(enable = "sve")]
27246#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27247#[cfg_attr(test, assert_instr(mls))]
27248pub fn svmls_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
27249    svmls_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3)
27250}
27251#[doc = "Multiply-subtract, minuend first"]
27252#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_z)"]
27253#[inline(always)]
27254#[target_feature(enable = "sve")]
27255#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27256#[cfg_attr(test, assert_instr(mls))]
27257pub fn svmls_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
27258    svmls_u32_z(pg, op1, op2, svdup_n_u32(op3))
27259}
27260#[doc = "Multiply-subtract, minuend first"]
27261#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_m)"]
27262#[inline(always)]
27263#[target_feature(enable = "sve")]
27264#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27265#[cfg_attr(test, assert_instr(mls))]
27266pub fn svmls_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
27267    unsafe { svmls_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
27268}
27269#[doc = "Multiply-subtract, minuend first"]
27270#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_m)"]
27271#[inline(always)]
27272#[target_feature(enable = "sve")]
27273#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27274#[cfg_attr(test, assert_instr(mls))]
27275pub fn svmls_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
27276    svmls_u64_m(pg, op1, op2, svdup_n_u64(op3))
27277}
27278#[doc = "Multiply-subtract, minuend first"]
27279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_x)"]
27280#[inline(always)]
27281#[target_feature(enable = "sve")]
27282#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27283#[cfg_attr(test, assert_instr(mls))]
27284pub fn svmls_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
27285    svmls_u64_m(pg, op1, op2, op3)
27286}
27287#[doc = "Multiply-subtract, minuend first"]
27288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_x)"]
27289#[inline(always)]
27290#[target_feature(enable = "sve")]
27291#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27292#[cfg_attr(test, assert_instr(mls))]
27293pub fn svmls_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
27294    svmls_u64_x(pg, op1, op2, svdup_n_u64(op3))
27295}
27296#[doc = "Multiply-subtract, minuend first"]
27297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_z)"]
27298#[inline(always)]
27299#[target_feature(enable = "sve")]
27300#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27301#[cfg_attr(test, assert_instr(mls))]
27302pub fn svmls_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
27303    svmls_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3)
27304}
27305#[doc = "Multiply-subtract, minuend first"]
27306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_z)"]
27307#[inline(always)]
27308#[target_feature(enable = "sve")]
27309#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27310#[cfg_attr(test, assert_instr(mls))]
27311pub fn svmls_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
27312    svmls_u64_z(pg, op1, op2, svdup_n_u64(op3))
27313}
27314#[doc = "Multiply-subtract, minuend first"]
27315#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f32])"]
27316#[inline(always)]
27317#[target_feature(enable = "sve")]
27318#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27319#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))]
27320pub fn svmls_lane_f32<const IMM_INDEX: i32>(
27321    op1: svfloat32_t,
27322    op2: svfloat32_t,
27323    op3: svfloat32_t,
27324) -> svfloat32_t {
27325    static_assert_range!(IMM_INDEX, 0..=3);
27326    unsafe extern "unadjusted" {
27327        #[cfg_attr(
27328            target_arch = "aarch64",
27329            link_name = "llvm.aarch64.sve.fmls.lane.nxv4f32"
27330        )]
27331        fn _svmls_lane_f32(
27332            op1: svfloat32_t,
27333            op2: svfloat32_t,
27334            op3: svfloat32_t,
27335            IMM_INDEX: i32,
27336        ) -> svfloat32_t;
27337    }
27338    unsafe { _svmls_lane_f32(op1, op2, op3, IMM_INDEX) }
27339}
27340#[doc = "Multiply-subtract, minuend first"]
27341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f64])"]
27342#[inline(always)]
27343#[target_feature(enable = "sve")]
27344#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27345#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))]
27346pub fn svmls_lane_f64<const IMM_INDEX: i32>(
27347    op1: svfloat64_t,
27348    op2: svfloat64_t,
27349    op3: svfloat64_t,
27350) -> svfloat64_t {
27351    static_assert_range!(IMM_INDEX, 0..=1);
27352    unsafe extern "unadjusted" {
27353        #[cfg_attr(
27354            target_arch = "aarch64",
27355            link_name = "llvm.aarch64.sve.fmls.lane.nxv2f64"
27356        )]
27357        fn _svmls_lane_f64(
27358            op1: svfloat64_t,
27359            op2: svfloat64_t,
27360            op3: svfloat64_t,
27361            IMM_INDEX: i32,
27362        ) -> svfloat64_t;
27363    }
27364    unsafe { _svmls_lane_f64(op1, op2, op3, IMM_INDEX) }
27365}
27366#[doc = "Matrix multiply-accumulate"]
27367#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f32])"]
27368#[inline(always)]
27369#[target_feature(enable = "sve,f32mm")]
27370#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27371#[cfg_attr(test, assert_instr(fmmla))]
27372pub fn svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t {
27373    unsafe extern "unadjusted" {
27374        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv4f32")]
27375        fn _svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t;
27376    }
27377    unsafe { _svmmla_f32(op1, op2, op3) }
27378}
27379#[doc = "Matrix multiply-accumulate"]
27380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f64])"]
27381#[inline(always)]
27382#[target_feature(enable = "sve,f64mm")]
27383#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27384#[cfg_attr(test, assert_instr(fmmla))]
27385pub fn svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t {
27386    unsafe extern "unadjusted" {
27387        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv2f64")]
27388        fn _svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t;
27389    }
27390    unsafe { _svmmla_f64(op1, op2, op3) }
27391}
27392#[doc = "Matrix multiply-accumulate"]
27393#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_s32])"]
27394#[inline(always)]
27395#[target_feature(enable = "sve,i8mm")]
27396#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27397#[cfg_attr(test, assert_instr(smmla))]
27398pub fn svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t {
27399    unsafe extern "unadjusted" {
27400        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smmla.nxv4i32")]
27401        fn _svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
27402    }
27403    unsafe { _svmmla_s32(op1, op2, op3) }
27404}
27405#[doc = "Matrix multiply-accumulate"]
27406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_u32])"]
27407#[inline(always)]
27408#[target_feature(enable = "sve,i8mm")]
27409#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27410#[cfg_attr(test, assert_instr(ummla))]
27411pub fn svmmla_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t {
27412    unsafe extern "unadjusted" {
27413        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ummla.nxv4i32")]
27414        fn _svmmla_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
27415    }
27416    unsafe { _svmmla_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
27417}
27418#[doc = "Move"]
27419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmov[_b]_z)"]
27420#[inline(always)]
27421#[target_feature(enable = "sve")]
27422#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27423#[cfg_attr(test, assert_instr(mov))]
27424pub fn svmov_b_z(pg: svbool_t, op: svbool_t) -> svbool_t {
27425    svand_b_z(pg, op, op)
27426}
27427#[doc = "Multiply-subtract, multiplicand first"]
27428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_m)"]
27429#[inline(always)]
27430#[target_feature(enable = "sve")]
27431#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27432#[cfg_attr(test, assert_instr(fmsb))]
27433pub fn svmsb_f32_m(
27434    pg: svbool_t,
27435    op1: svfloat32_t,
27436    op2: svfloat32_t,
27437    op3: svfloat32_t,
27438) -> svfloat32_t {
27439    unsafe extern "unadjusted" {
27440        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv4f32")]
27441        fn _svmsb_f32_m(
27442            pg: svbool4_t,
27443            op1: svfloat32_t,
27444            op2: svfloat32_t,
27445            op3: svfloat32_t,
27446        ) -> svfloat32_t;
27447    }
27448    unsafe { _svmsb_f32_m(pg.sve_into(), op1, op2, op3) }
27449}
27450#[doc = "Multiply-subtract, multiplicand first"]
27451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_m)"]
27452#[inline(always)]
27453#[target_feature(enable = "sve")]
27454#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27455#[cfg_attr(test, assert_instr(fmsb))]
27456pub fn svmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
27457    svmsb_f32_m(pg, op1, op2, svdup_n_f32(op3))
27458}
27459#[doc = "Multiply-subtract, multiplicand first"]
27460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_x)"]
27461#[inline(always)]
27462#[target_feature(enable = "sve")]
27463#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27464#[cfg_attr(test, assert_instr(fmsb))]
27465pub fn svmsb_f32_x(
27466    pg: svbool_t,
27467    op1: svfloat32_t,
27468    op2: svfloat32_t,
27469    op3: svfloat32_t,
27470) -> svfloat32_t {
27471    svmsb_f32_m(pg, op1, op2, op3)
27472}
27473#[doc = "Multiply-subtract, multiplicand first"]
27474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_x)"]
27475#[inline(always)]
27476#[target_feature(enable = "sve")]
27477#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27478#[cfg_attr(test, assert_instr(fmsb))]
27479pub fn svmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
27480    svmsb_f32_x(pg, op1, op2, svdup_n_f32(op3))
27481}
27482#[doc = "Multiply-subtract, multiplicand first"]
27483#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_z)"]
27484#[inline(always)]
27485#[target_feature(enable = "sve")]
27486#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27487#[cfg_attr(test, assert_instr(fmsb))]
27488pub fn svmsb_f32_z(
27489    pg: svbool_t,
27490    op1: svfloat32_t,
27491    op2: svfloat32_t,
27492    op3: svfloat32_t,
27493) -> svfloat32_t {
27494    svmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
27495}
27496#[doc = "Multiply-subtract, multiplicand first"]
27497#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_z)"]
27498#[inline(always)]
27499#[target_feature(enable = "sve")]
27500#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27501#[cfg_attr(test, assert_instr(fmsb))]
27502pub fn svmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
27503    svmsb_f32_z(pg, op1, op2, svdup_n_f32(op3))
27504}
27505#[doc = "Multiply-subtract, multiplicand first"]
27506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_m)"]
27507#[inline(always)]
27508#[target_feature(enable = "sve")]
27509#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27510#[cfg_attr(test, assert_instr(fmsb))]
27511pub fn svmsb_f64_m(
27512    pg: svbool_t,
27513    op1: svfloat64_t,
27514    op2: svfloat64_t,
27515    op3: svfloat64_t,
27516) -> svfloat64_t {
27517    unsafe extern "unadjusted" {
27518        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv2f64")]
27519        fn _svmsb_f64_m(
27520            pg: svbool2_t,
27521            op1: svfloat64_t,
27522            op2: svfloat64_t,
27523            op3: svfloat64_t,
27524        ) -> svfloat64_t;
27525    }
27526    unsafe { _svmsb_f64_m(pg.sve_into(), op1, op2, op3) }
27527}
27528#[doc = "Multiply-subtract, multiplicand first"]
27529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_m)"]
27530#[inline(always)]
27531#[target_feature(enable = "sve")]
27532#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27533#[cfg_attr(test, assert_instr(fmsb))]
27534pub fn svmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
27535    svmsb_f64_m(pg, op1, op2, svdup_n_f64(op3))
27536}
27537#[doc = "Multiply-subtract, multiplicand first"]
27538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_x)"]
27539#[inline(always)]
27540#[target_feature(enable = "sve")]
27541#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27542#[cfg_attr(test, assert_instr(fmsb))]
27543pub fn svmsb_f64_x(
27544    pg: svbool_t,
27545    op1: svfloat64_t,
27546    op2: svfloat64_t,
27547    op3: svfloat64_t,
27548) -> svfloat64_t {
27549    svmsb_f64_m(pg, op1, op2, op3)
27550}
27551#[doc = "Multiply-subtract, multiplicand first"]
27552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_x)"]
27553#[inline(always)]
27554#[target_feature(enable = "sve")]
27555#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27556#[cfg_attr(test, assert_instr(fmsb))]
27557pub fn svmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
27558    svmsb_f64_x(pg, op1, op2, svdup_n_f64(op3))
27559}
27560#[doc = "Multiply-subtract, multiplicand first"]
27561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_z)"]
27562#[inline(always)]
27563#[target_feature(enable = "sve")]
27564#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27565#[cfg_attr(test, assert_instr(fmsb))]
27566pub fn svmsb_f64_z(
27567    pg: svbool_t,
27568    op1: svfloat64_t,
27569    op2: svfloat64_t,
27570    op3: svfloat64_t,
27571) -> svfloat64_t {
27572    svmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
27573}
27574#[doc = "Multiply-subtract, multiplicand first"]
27575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_z)"]
27576#[inline(always)]
27577#[target_feature(enable = "sve")]
27578#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27579#[cfg_attr(test, assert_instr(fmsb))]
27580pub fn svmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
27581    svmsb_f64_z(pg, op1, op2, svdup_n_f64(op3))
27582}
27583#[doc = "Multiply-subtract, multiplicand first"]
27584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_m)"]
27585#[inline(always)]
27586#[target_feature(enable = "sve")]
27587#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27588#[cfg_attr(test, assert_instr(msb))]
27589pub fn svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
27590    unsafe extern "unadjusted" {
27591        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv16i8")]
27592        fn _svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
27593    }
27594    unsafe { _svmsb_s8_m(pg, op1, op2, op3) }
27595}
27596#[doc = "Multiply-subtract, multiplicand first"]
27597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_m)"]
27598#[inline(always)]
27599#[target_feature(enable = "sve")]
27600#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27601#[cfg_attr(test, assert_instr(msb))]
27602pub fn svmsb_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
27603    svmsb_s8_m(pg, op1, op2, svdup_n_s8(op3))
27604}
27605#[doc = "Multiply-subtract, multiplicand first"]
27606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_x)"]
27607#[inline(always)]
27608#[target_feature(enable = "sve")]
27609#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27610#[cfg_attr(test, assert_instr(msb))]
27611pub fn svmsb_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
27612    svmsb_s8_m(pg, op1, op2, op3)
27613}
27614#[doc = "Multiply-subtract, multiplicand first"]
27615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_x)"]
27616#[inline(always)]
27617#[target_feature(enable = "sve")]
27618#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27619#[cfg_attr(test, assert_instr(msb))]
27620pub fn svmsb_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
27621    svmsb_s8_x(pg, op1, op2, svdup_n_s8(op3))
27622}
27623#[doc = "Multiply-subtract, multiplicand first"]
27624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_z)"]
27625#[inline(always)]
27626#[target_feature(enable = "sve")]
27627#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27628#[cfg_attr(test, assert_instr(msb))]
27629pub fn svmsb_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
27630    svmsb_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3)
27631}
27632#[doc = "Multiply-subtract, multiplicand first"]
27633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_z)"]
27634#[inline(always)]
27635#[target_feature(enable = "sve")]
27636#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27637#[cfg_attr(test, assert_instr(msb))]
27638pub fn svmsb_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
27639    svmsb_s8_z(pg, op1, op2, svdup_n_s8(op3))
27640}
27641#[doc = "Multiply-subtract, multiplicand first"]
27642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_m)"]
27643#[inline(always)]
27644#[target_feature(enable = "sve")]
27645#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27646#[cfg_attr(test, assert_instr(msb))]
27647pub fn svmsb_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
27648    unsafe extern "unadjusted" {
27649        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv8i16")]
27650        fn _svmsb_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t)
27651            -> svint16_t;
27652    }
27653    unsafe { _svmsb_s16_m(pg.sve_into(), op1, op2, op3) }
27654}
27655#[doc = "Multiply-subtract, multiplicand first"]
27656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_m)"]
27657#[inline(always)]
27658#[target_feature(enable = "sve")]
27659#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27660#[cfg_attr(test, assert_instr(msb))]
27661pub fn svmsb_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
27662    svmsb_s16_m(pg, op1, op2, svdup_n_s16(op3))
27663}
27664#[doc = "Multiply-subtract, multiplicand first"]
27665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_x)"]
27666#[inline(always)]
27667#[target_feature(enable = "sve")]
27668#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27669#[cfg_attr(test, assert_instr(msb))]
27670pub fn svmsb_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
27671    svmsb_s16_m(pg, op1, op2, op3)
27672}
27673#[doc = "Multiply-subtract, multiplicand first"]
27674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_x)"]
27675#[inline(always)]
27676#[target_feature(enable = "sve")]
27677#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27678#[cfg_attr(test, assert_instr(msb))]
27679pub fn svmsb_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
27680    svmsb_s16_x(pg, op1, op2, svdup_n_s16(op3))
27681}
27682#[doc = "Multiply-subtract, multiplicand first"]
27683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_z)"]
27684#[inline(always)]
27685#[target_feature(enable = "sve")]
27686#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27687#[cfg_attr(test, assert_instr(msb))]
27688pub fn svmsb_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
27689    svmsb_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3)
27690}
27691#[doc = "Multiply-subtract, multiplicand first"]
27692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_z)"]
27693#[inline(always)]
27694#[target_feature(enable = "sve")]
27695#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27696#[cfg_attr(test, assert_instr(msb))]
27697pub fn svmsb_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
27698    svmsb_s16_z(pg, op1, op2, svdup_n_s16(op3))
27699}
27700#[doc = "Multiply-subtract, multiplicand first"]
27701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_m)"]
27702#[inline(always)]
27703#[target_feature(enable = "sve")]
27704#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27705#[cfg_attr(test, assert_instr(msb))]
27706pub fn svmsb_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
27707    unsafe extern "unadjusted" {
27708        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv4i32")]
27709        fn _svmsb_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t)
27710            -> svint32_t;
27711    }
27712    unsafe { _svmsb_s32_m(pg.sve_into(), op1, op2, op3) }
27713}
27714#[doc = "Multiply-subtract, multiplicand first"]
27715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_m)"]
27716#[inline(always)]
27717#[target_feature(enable = "sve")]
27718#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27719#[cfg_attr(test, assert_instr(msb))]
27720pub fn svmsb_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
27721    svmsb_s32_m(pg, op1, op2, svdup_n_s32(op3))
27722}
27723#[doc = "Multiply-subtract, multiplicand first"]
27724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_x)"]
27725#[inline(always)]
27726#[target_feature(enable = "sve")]
27727#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27728#[cfg_attr(test, assert_instr(msb))]
27729pub fn svmsb_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
27730    svmsb_s32_m(pg, op1, op2, op3)
27731}
27732#[doc = "Multiply-subtract, multiplicand first"]
27733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_x)"]
27734#[inline(always)]
27735#[target_feature(enable = "sve")]
27736#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27737#[cfg_attr(test, assert_instr(msb))]
27738pub fn svmsb_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
27739    svmsb_s32_x(pg, op1, op2, svdup_n_s32(op3))
27740}
27741#[doc = "Multiply-subtract, multiplicand first"]
27742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_z)"]
27743#[inline(always)]
27744#[target_feature(enable = "sve")]
27745#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27746#[cfg_attr(test, assert_instr(msb))]
27747pub fn svmsb_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
27748    svmsb_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3)
27749}
27750#[doc = "Multiply-subtract, multiplicand first"]
27751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_z)"]
27752#[inline(always)]
27753#[target_feature(enable = "sve")]
27754#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27755#[cfg_attr(test, assert_instr(msb))]
27756pub fn svmsb_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
27757    svmsb_s32_z(pg, op1, op2, svdup_n_s32(op3))
27758}
27759#[doc = "Multiply-subtract, multiplicand first"]
27760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_m)"]
27761#[inline(always)]
27762#[target_feature(enable = "sve")]
27763#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27764#[cfg_attr(test, assert_instr(msb))]
27765pub fn svmsb_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
27766    unsafe extern "unadjusted" {
27767        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv2i64")]
27768        fn _svmsb_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t)
27769            -> svint64_t;
27770    }
27771    unsafe { _svmsb_s64_m(pg.sve_into(), op1, op2, op3) }
27772}
27773#[doc = "Multiply-subtract, multiplicand first"]
27774#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_m)"]
27775#[inline(always)]
27776#[target_feature(enable = "sve")]
27777#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27778#[cfg_attr(test, assert_instr(msb))]
27779pub fn svmsb_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
27780    svmsb_s64_m(pg, op1, op2, svdup_n_s64(op3))
27781}
27782#[doc = "Multiply-subtract, multiplicand first"]
27783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_x)"]
27784#[inline(always)]
27785#[target_feature(enable = "sve")]
27786#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27787#[cfg_attr(test, assert_instr(msb))]
27788pub fn svmsb_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
27789    svmsb_s64_m(pg, op1, op2, op3)
27790}
27791#[doc = "Multiply-subtract, multiplicand first"]
27792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_x)"]
27793#[inline(always)]
27794#[target_feature(enable = "sve")]
27795#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27796#[cfg_attr(test, assert_instr(msb))]
27797pub fn svmsb_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
27798    svmsb_s64_x(pg, op1, op2, svdup_n_s64(op3))
27799}
27800#[doc = "Multiply-subtract, multiplicand first"]
27801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_z)"]
27802#[inline(always)]
27803#[target_feature(enable = "sve")]
27804#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27805#[cfg_attr(test, assert_instr(msb))]
27806pub fn svmsb_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
27807    svmsb_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3)
27808}
27809#[doc = "Multiply-subtract, multiplicand first"]
27810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_z)"]
27811#[inline(always)]
27812#[target_feature(enable = "sve")]
27813#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27814#[cfg_attr(test, assert_instr(msb))]
27815pub fn svmsb_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
27816    svmsb_s64_z(pg, op1, op2, svdup_n_s64(op3))
27817}
27818#[doc = "Multiply-subtract, multiplicand first"]
27819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_m)"]
27820#[inline(always)]
27821#[target_feature(enable = "sve")]
27822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27823#[cfg_attr(test, assert_instr(msb))]
27824pub fn svmsb_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
27825    unsafe { svmsb_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
27826}
27827#[doc = "Multiply-subtract, multiplicand first"]
27828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_m)"]
27829#[inline(always)]
27830#[target_feature(enable = "sve")]
27831#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27832#[cfg_attr(test, assert_instr(msb))]
27833pub fn svmsb_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
27834    svmsb_u8_m(pg, op1, op2, svdup_n_u8(op3))
27835}
27836#[doc = "Multiply-subtract, multiplicand first"]
27837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_x)"]
27838#[inline(always)]
27839#[target_feature(enable = "sve")]
27840#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27841#[cfg_attr(test, assert_instr(msb))]
27842pub fn svmsb_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
27843    svmsb_u8_m(pg, op1, op2, op3)
27844}
27845#[doc = "Multiply-subtract, multiplicand first"]
27846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_x)"]
27847#[inline(always)]
27848#[target_feature(enable = "sve")]
27849#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27850#[cfg_attr(test, assert_instr(msb))]
27851pub fn svmsb_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
27852    svmsb_u8_x(pg, op1, op2, svdup_n_u8(op3))
27853}
27854#[doc = "Multiply-subtract, multiplicand first"]
27855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_z)"]
27856#[inline(always)]
27857#[target_feature(enable = "sve")]
27858#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27859#[cfg_attr(test, assert_instr(msb))]
27860pub fn svmsb_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
27861    svmsb_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3)
27862}
27863#[doc = "Multiply-subtract, multiplicand first"]
27864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_z)"]
27865#[inline(always)]
27866#[target_feature(enable = "sve")]
27867#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27868#[cfg_attr(test, assert_instr(msb))]
27869pub fn svmsb_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
27870    svmsb_u8_z(pg, op1, op2, svdup_n_u8(op3))
27871}
27872#[doc = "Multiply-subtract, multiplicand first"]
27873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_m)"]
27874#[inline(always)]
27875#[target_feature(enable = "sve")]
27876#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27877#[cfg_attr(test, assert_instr(msb))]
27878pub fn svmsb_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
27879    unsafe { svmsb_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
27880}
27881#[doc = "Multiply-subtract, multiplicand first"]
27882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_m)"]
27883#[inline(always)]
27884#[target_feature(enable = "sve")]
27885#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27886#[cfg_attr(test, assert_instr(msb))]
27887pub fn svmsb_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
27888    svmsb_u16_m(pg, op1, op2, svdup_n_u16(op3))
27889}
27890#[doc = "Multiply-subtract, multiplicand first"]
27891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_x)"]
27892#[inline(always)]
27893#[target_feature(enable = "sve")]
27894#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27895#[cfg_attr(test, assert_instr(msb))]
27896pub fn svmsb_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
27897    svmsb_u16_m(pg, op1, op2, op3)
27898}
27899#[doc = "Multiply-subtract, multiplicand first"]
27900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_x)"]
27901#[inline(always)]
27902#[target_feature(enable = "sve")]
27903#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27904#[cfg_attr(test, assert_instr(msb))]
27905pub fn svmsb_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
27906    svmsb_u16_x(pg, op1, op2, svdup_n_u16(op3))
27907}
27908#[doc = "Multiply-subtract, multiplicand first"]
27909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_z)"]
27910#[inline(always)]
27911#[target_feature(enable = "sve")]
27912#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27913#[cfg_attr(test, assert_instr(msb))]
27914pub fn svmsb_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
27915    svmsb_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3)
27916}
27917#[doc = "Multiply-subtract, multiplicand first"]
27918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_z)"]
27919#[inline(always)]
27920#[target_feature(enable = "sve")]
27921#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27922#[cfg_attr(test, assert_instr(msb))]
27923pub fn svmsb_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
27924    svmsb_u16_z(pg, op1, op2, svdup_n_u16(op3))
27925}
27926#[doc = "Multiply-subtract, multiplicand first"]
27927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_m)"]
27928#[inline(always)]
27929#[target_feature(enable = "sve")]
27930#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27931#[cfg_attr(test, assert_instr(msb))]
27932pub fn svmsb_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
27933    unsafe { svmsb_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
27934}
27935#[doc = "Multiply-subtract, multiplicand first"]
27936#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_m)"]
27937#[inline(always)]
27938#[target_feature(enable = "sve")]
27939#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27940#[cfg_attr(test, assert_instr(msb))]
27941pub fn svmsb_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
27942    svmsb_u32_m(pg, op1, op2, svdup_n_u32(op3))
27943}
27944#[doc = "Multiply-subtract, multiplicand first"]
27945#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_x)"]
27946#[inline(always)]
27947#[target_feature(enable = "sve")]
27948#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27949#[cfg_attr(test, assert_instr(msb))]
27950pub fn svmsb_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
27951    svmsb_u32_m(pg, op1, op2, op3)
27952}
27953#[doc = "Multiply-subtract, multiplicand first"]
27954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_x)"]
27955#[inline(always)]
27956#[target_feature(enable = "sve")]
27957#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27958#[cfg_attr(test, assert_instr(msb))]
27959pub fn svmsb_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
27960    svmsb_u32_x(pg, op1, op2, svdup_n_u32(op3))
27961}
27962#[doc = "Multiply-subtract, multiplicand first"]
27963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_z)"]
27964#[inline(always)]
27965#[target_feature(enable = "sve")]
27966#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27967#[cfg_attr(test, assert_instr(msb))]
27968pub fn svmsb_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
27969    svmsb_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3)
27970}
27971#[doc = "Multiply-subtract, multiplicand first"]
27972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_z)"]
27973#[inline(always)]
27974#[target_feature(enable = "sve")]
27975#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27976#[cfg_attr(test, assert_instr(msb))]
27977pub fn svmsb_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
27978    svmsb_u32_z(pg, op1, op2, svdup_n_u32(op3))
27979}
27980#[doc = "Multiply-subtract, multiplicand first"]
27981#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_m)"]
27982#[inline(always)]
27983#[target_feature(enable = "sve")]
27984#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27985#[cfg_attr(test, assert_instr(msb))]
27986pub fn svmsb_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
27987    unsafe { svmsb_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
27988}
27989#[doc = "Multiply-subtract, multiplicand first"]
27990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_m)"]
27991#[inline(always)]
27992#[target_feature(enable = "sve")]
27993#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
27994#[cfg_attr(test, assert_instr(msb))]
27995pub fn svmsb_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
27996    svmsb_u64_m(pg, op1, op2, svdup_n_u64(op3))
27997}
27998#[doc = "Multiply-subtract, multiplicand first"]
27999#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_x)"]
28000#[inline(always)]
28001#[target_feature(enable = "sve")]
28002#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28003#[cfg_attr(test, assert_instr(msb))]
28004pub fn svmsb_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
28005    svmsb_u64_m(pg, op1, op2, op3)
28006}
28007#[doc = "Multiply-subtract, multiplicand first"]
28008#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_x)"]
28009#[inline(always)]
28010#[target_feature(enable = "sve")]
28011#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28012#[cfg_attr(test, assert_instr(msb))]
28013pub fn svmsb_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
28014    svmsb_u64_x(pg, op1, op2, svdup_n_u64(op3))
28015}
28016#[doc = "Multiply-subtract, multiplicand first"]
28017#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_z)"]
28018#[inline(always)]
28019#[target_feature(enable = "sve")]
28020#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28021#[cfg_attr(test, assert_instr(msb))]
28022pub fn svmsb_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
28023    svmsb_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3)
28024}
28025#[doc = "Multiply-subtract, multiplicand first"]
28026#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_z)"]
28027#[inline(always)]
28028#[target_feature(enable = "sve")]
28029#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28030#[cfg_attr(test, assert_instr(msb))]
28031pub fn svmsb_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
28032    svmsb_u64_z(pg, op1, op2, svdup_n_u64(op3))
28033}
28034#[doc = "Multiply"]
28035#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"]
28036#[inline(always)]
28037#[target_feature(enable = "sve")]
28038#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28039#[cfg_attr(test, assert_instr(fmul))]
28040pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
28041    unsafe extern "unadjusted" {
28042        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")]
28043        fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
28044    }
28045    unsafe { _svmul_f32_m(pg.sve_into(), op1, op2) }
28046}
28047#[doc = "Multiply"]
28048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"]
28049#[inline(always)]
28050#[target_feature(enable = "sve")]
28051#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28052#[cfg_attr(test, assert_instr(fmul))]
28053pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
28054    svmul_f32_m(pg, op1, svdup_n_f32(op2))
28055}
28056#[doc = "Multiply"]
28057#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"]
28058#[inline(always)]
28059#[target_feature(enable = "sve")]
28060#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28061#[cfg_attr(test, assert_instr(fmul))]
28062pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
28063    svmul_f32_m(pg, op1, op2)
28064}
28065#[doc = "Multiply"]
28066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"]
28067#[inline(always)]
28068#[target_feature(enable = "sve")]
28069#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28070#[cfg_attr(test, assert_instr(fmul))]
28071pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
28072    svmul_f32_x(pg, op1, svdup_n_f32(op2))
28073}
28074#[doc = "Multiply"]
28075#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"]
28076#[inline(always)]
28077#[target_feature(enable = "sve")]
28078#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28079#[cfg_attr(test, assert_instr(fmul))]
28080pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
28081    svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
28082}
28083#[doc = "Multiply"]
28084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"]
28085#[inline(always)]
28086#[target_feature(enable = "sve")]
28087#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28088#[cfg_attr(test, assert_instr(fmul))]
28089pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
28090    svmul_f32_z(pg, op1, svdup_n_f32(op2))
28091}
28092#[doc = "Multiply"]
28093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"]
28094#[inline(always)]
28095#[target_feature(enable = "sve")]
28096#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28097#[cfg_attr(test, assert_instr(fmul))]
28098pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
28099    unsafe extern "unadjusted" {
28100        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")]
28101        fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
28102    }
28103    unsafe { _svmul_f64_m(pg.sve_into(), op1, op2) }
28104}
28105#[doc = "Multiply"]
28106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"]
28107#[inline(always)]
28108#[target_feature(enable = "sve")]
28109#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28110#[cfg_attr(test, assert_instr(fmul))]
28111pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
28112    svmul_f64_m(pg, op1, svdup_n_f64(op2))
28113}
28114#[doc = "Multiply"]
28115#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"]
28116#[inline(always)]
28117#[target_feature(enable = "sve")]
28118#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28119#[cfg_attr(test, assert_instr(fmul))]
28120pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
28121    svmul_f64_m(pg, op1, op2)
28122}
28123#[doc = "Multiply"]
28124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"]
28125#[inline(always)]
28126#[target_feature(enable = "sve")]
28127#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28128#[cfg_attr(test, assert_instr(fmul))]
28129pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
28130    svmul_f64_x(pg, op1, svdup_n_f64(op2))
28131}
28132#[doc = "Multiply"]
28133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"]
28134#[inline(always)]
28135#[target_feature(enable = "sve")]
28136#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28137#[cfg_attr(test, assert_instr(fmul))]
28138pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
28139    svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
28140}
28141#[doc = "Multiply"]
28142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"]
28143#[inline(always)]
28144#[target_feature(enable = "sve")]
28145#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28146#[cfg_attr(test, assert_instr(fmul))]
28147pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
28148    svmul_f64_z(pg, op1, svdup_n_f64(op2))
28149}
28150#[doc = "Multiply"]
28151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"]
28152#[inline(always)]
28153#[target_feature(enable = "sve")]
28154#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28155#[cfg_attr(test, assert_instr(mul))]
28156pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
28157    unsafe extern "unadjusted" {
28158        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")]
28159        fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
28160    }
28161    unsafe { _svmul_s8_m(pg, op1, op2) }
28162}
28163#[doc = "Multiply"]
28164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"]
28165#[inline(always)]
28166#[target_feature(enable = "sve")]
28167#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28168#[cfg_attr(test, assert_instr(mul))]
28169pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
28170    svmul_s8_m(pg, op1, svdup_n_s8(op2))
28171}
28172#[doc = "Multiply"]
28173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"]
28174#[inline(always)]
28175#[target_feature(enable = "sve")]
28176#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28177#[cfg_attr(test, assert_instr(mul))]
28178pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
28179    svmul_s8_m(pg, op1, op2)
28180}
28181#[doc = "Multiply"]
28182#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"]
28183#[inline(always)]
28184#[target_feature(enable = "sve")]
28185#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28186#[cfg_attr(test, assert_instr(mul))]
28187pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
28188    svmul_s8_x(pg, op1, svdup_n_s8(op2))
28189}
28190#[doc = "Multiply"]
28191#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"]
28192#[inline(always)]
28193#[target_feature(enable = "sve")]
28194#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28195#[cfg_attr(test, assert_instr(mul))]
28196pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
28197    svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
28198}
28199#[doc = "Multiply"]
28200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"]
28201#[inline(always)]
28202#[target_feature(enable = "sve")]
28203#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28204#[cfg_attr(test, assert_instr(mul))]
28205pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
28206    svmul_s8_z(pg, op1, svdup_n_s8(op2))
28207}
28208#[doc = "Multiply"]
28209#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"]
28210#[inline(always)]
28211#[target_feature(enable = "sve")]
28212#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28213#[cfg_attr(test, assert_instr(mul))]
28214pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
28215    unsafe extern "unadjusted" {
28216        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")]
28217        fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
28218    }
28219    unsafe { _svmul_s16_m(pg.sve_into(), op1, op2) }
28220}
28221#[doc = "Multiply"]
28222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"]
28223#[inline(always)]
28224#[target_feature(enable = "sve")]
28225#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28226#[cfg_attr(test, assert_instr(mul))]
28227pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
28228    svmul_s16_m(pg, op1, svdup_n_s16(op2))
28229}
28230#[doc = "Multiply"]
28231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"]
28232#[inline(always)]
28233#[target_feature(enable = "sve")]
28234#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28235#[cfg_attr(test, assert_instr(mul))]
28236pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
28237    svmul_s16_m(pg, op1, op2)
28238}
28239#[doc = "Multiply"]
28240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"]
28241#[inline(always)]
28242#[target_feature(enable = "sve")]
28243#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28244#[cfg_attr(test, assert_instr(mul))]
28245pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
28246    svmul_s16_x(pg, op1, svdup_n_s16(op2))
28247}
28248#[doc = "Multiply"]
28249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"]
28250#[inline(always)]
28251#[target_feature(enable = "sve")]
28252#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28253#[cfg_attr(test, assert_instr(mul))]
28254pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
28255    svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
28256}
28257#[doc = "Multiply"]
28258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"]
28259#[inline(always)]
28260#[target_feature(enable = "sve")]
28261#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28262#[cfg_attr(test, assert_instr(mul))]
28263pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
28264    svmul_s16_z(pg, op1, svdup_n_s16(op2))
28265}
28266#[doc = "Multiply"]
28267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"]
28268#[inline(always)]
28269#[target_feature(enable = "sve")]
28270#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28271#[cfg_attr(test, assert_instr(mul))]
28272pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
28273    unsafe extern "unadjusted" {
28274        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")]
28275        fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
28276    }
28277    unsafe { _svmul_s32_m(pg.sve_into(), op1, op2) }
28278}
28279#[doc = "Multiply"]
28280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"]
28281#[inline(always)]
28282#[target_feature(enable = "sve")]
28283#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28284#[cfg_attr(test, assert_instr(mul))]
28285pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
28286    svmul_s32_m(pg, op1, svdup_n_s32(op2))
28287}
28288#[doc = "Multiply"]
28289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"]
28290#[inline(always)]
28291#[target_feature(enable = "sve")]
28292#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28293#[cfg_attr(test, assert_instr(mul))]
28294pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
28295    svmul_s32_m(pg, op1, op2)
28296}
28297#[doc = "Multiply"]
28298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"]
28299#[inline(always)]
28300#[target_feature(enable = "sve")]
28301#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28302#[cfg_attr(test, assert_instr(mul))]
28303pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
28304    svmul_s32_x(pg, op1, svdup_n_s32(op2))
28305}
28306#[doc = "Multiply"]
28307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"]
28308#[inline(always)]
28309#[target_feature(enable = "sve")]
28310#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28311#[cfg_attr(test, assert_instr(mul))]
28312pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
28313    svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
28314}
28315#[doc = "Multiply"]
28316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"]
28317#[inline(always)]
28318#[target_feature(enable = "sve")]
28319#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28320#[cfg_attr(test, assert_instr(mul))]
28321pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
28322    svmul_s32_z(pg, op1, svdup_n_s32(op2))
28323}
28324#[doc = "Multiply"]
28325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"]
28326#[inline(always)]
28327#[target_feature(enable = "sve")]
28328#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28329#[cfg_attr(test, assert_instr(mul))]
28330pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
28331    unsafe extern "unadjusted" {
28332        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")]
28333        fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
28334    }
28335    unsafe { _svmul_s64_m(pg.sve_into(), op1, op2) }
28336}
28337#[doc = "Multiply"]
28338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"]
28339#[inline(always)]
28340#[target_feature(enable = "sve")]
28341#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28342#[cfg_attr(test, assert_instr(mul))]
28343pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
28344    svmul_s64_m(pg, op1, svdup_n_s64(op2))
28345}
28346#[doc = "Multiply"]
28347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"]
28348#[inline(always)]
28349#[target_feature(enable = "sve")]
28350#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28351#[cfg_attr(test, assert_instr(mul))]
28352pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
28353    svmul_s64_m(pg, op1, op2)
28354}
28355#[doc = "Multiply"]
28356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"]
28357#[inline(always)]
28358#[target_feature(enable = "sve")]
28359#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28360#[cfg_attr(test, assert_instr(mul))]
28361pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
28362    svmul_s64_x(pg, op1, svdup_n_s64(op2))
28363}
28364#[doc = "Multiply"]
28365#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"]
28366#[inline(always)]
28367#[target_feature(enable = "sve")]
28368#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28369#[cfg_attr(test, assert_instr(mul))]
28370pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
28371    svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
28372}
28373#[doc = "Multiply"]
28374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"]
28375#[inline(always)]
28376#[target_feature(enable = "sve")]
28377#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28378#[cfg_attr(test, assert_instr(mul))]
28379pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
28380    svmul_s64_z(pg, op1, svdup_n_s64(op2))
28381}
28382#[doc = "Multiply"]
28383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"]
28384#[inline(always)]
28385#[target_feature(enable = "sve")]
28386#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28387#[cfg_attr(test, assert_instr(mul))]
28388pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
28389    unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
28390}
28391#[doc = "Multiply"]
28392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"]
28393#[inline(always)]
28394#[target_feature(enable = "sve")]
28395#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28396#[cfg_attr(test, assert_instr(mul))]
28397pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
28398    svmul_u8_m(pg, op1, svdup_n_u8(op2))
28399}
28400#[doc = "Multiply"]
28401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"]
28402#[inline(always)]
28403#[target_feature(enable = "sve")]
28404#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28405#[cfg_attr(test, assert_instr(mul))]
28406pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
28407    svmul_u8_m(pg, op1, op2)
28408}
28409#[doc = "Multiply"]
28410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"]
28411#[inline(always)]
28412#[target_feature(enable = "sve")]
28413#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28414#[cfg_attr(test, assert_instr(mul))]
28415pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
28416    svmul_u8_x(pg, op1, svdup_n_u8(op2))
28417}
28418#[doc = "Multiply"]
28419#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"]
28420#[inline(always)]
28421#[target_feature(enable = "sve")]
28422#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28423#[cfg_attr(test, assert_instr(mul))]
28424pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
28425    svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
28426}
28427#[doc = "Multiply"]
28428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"]
28429#[inline(always)]
28430#[target_feature(enable = "sve")]
28431#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28432#[cfg_attr(test, assert_instr(mul))]
28433pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
28434    svmul_u8_z(pg, op1, svdup_n_u8(op2))
28435}
28436#[doc = "Multiply"]
28437#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"]
28438#[inline(always)]
28439#[target_feature(enable = "sve")]
28440#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28441#[cfg_attr(test, assert_instr(mul))]
28442pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
28443    unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
28444}
28445#[doc = "Multiply"]
28446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"]
28447#[inline(always)]
28448#[target_feature(enable = "sve")]
28449#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28450#[cfg_attr(test, assert_instr(mul))]
28451pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
28452    svmul_u16_m(pg, op1, svdup_n_u16(op2))
28453}
28454#[doc = "Multiply"]
28455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"]
28456#[inline(always)]
28457#[target_feature(enable = "sve")]
28458#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28459#[cfg_attr(test, assert_instr(mul))]
28460pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
28461    svmul_u16_m(pg, op1, op2)
28462}
28463#[doc = "Multiply"]
28464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"]
28465#[inline(always)]
28466#[target_feature(enable = "sve")]
28467#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28468#[cfg_attr(test, assert_instr(mul))]
28469pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
28470    svmul_u16_x(pg, op1, svdup_n_u16(op2))
28471}
28472#[doc = "Multiply"]
28473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"]
28474#[inline(always)]
28475#[target_feature(enable = "sve")]
28476#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28477#[cfg_attr(test, assert_instr(mul))]
28478pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
28479    svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
28480}
28481#[doc = "Multiply"]
28482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"]
28483#[inline(always)]
28484#[target_feature(enable = "sve")]
28485#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28486#[cfg_attr(test, assert_instr(mul))]
28487pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
28488    svmul_u16_z(pg, op1, svdup_n_u16(op2))
28489}
28490#[doc = "Multiply"]
28491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"]
28492#[inline(always)]
28493#[target_feature(enable = "sve")]
28494#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28495#[cfg_attr(test, assert_instr(mul))]
28496pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
28497    unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
28498}
28499#[doc = "Multiply"]
28500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"]
28501#[inline(always)]
28502#[target_feature(enable = "sve")]
28503#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28504#[cfg_attr(test, assert_instr(mul))]
28505pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
28506    svmul_u32_m(pg, op1, svdup_n_u32(op2))
28507}
28508#[doc = "Multiply"]
28509#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"]
28510#[inline(always)]
28511#[target_feature(enable = "sve")]
28512#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28513#[cfg_attr(test, assert_instr(mul))]
28514pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
28515    svmul_u32_m(pg, op1, op2)
28516}
28517#[doc = "Multiply"]
28518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"]
28519#[inline(always)]
28520#[target_feature(enable = "sve")]
28521#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28522#[cfg_attr(test, assert_instr(mul))]
28523pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
28524    svmul_u32_x(pg, op1, svdup_n_u32(op2))
28525}
28526#[doc = "Multiply"]
28527#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"]
28528#[inline(always)]
28529#[target_feature(enable = "sve")]
28530#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28531#[cfg_attr(test, assert_instr(mul))]
28532pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
28533    svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
28534}
28535#[doc = "Multiply"]
28536#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"]
28537#[inline(always)]
28538#[target_feature(enable = "sve")]
28539#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28540#[cfg_attr(test, assert_instr(mul))]
28541pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
28542    svmul_u32_z(pg, op1, svdup_n_u32(op2))
28543}
28544#[doc = "Multiply"]
28545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"]
28546#[inline(always)]
28547#[target_feature(enable = "sve")]
28548#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28549#[cfg_attr(test, assert_instr(mul))]
28550pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
28551    unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
28552}
28553#[doc = "Multiply"]
28554#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"]
28555#[inline(always)]
28556#[target_feature(enable = "sve")]
28557#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28558#[cfg_attr(test, assert_instr(mul))]
28559pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
28560    svmul_u64_m(pg, op1, svdup_n_u64(op2))
28561}
28562#[doc = "Multiply"]
28563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"]
28564#[inline(always)]
28565#[target_feature(enable = "sve")]
28566#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28567#[cfg_attr(test, assert_instr(mul))]
28568pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
28569    svmul_u64_m(pg, op1, op2)
28570}
28571#[doc = "Multiply"]
28572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"]
28573#[inline(always)]
28574#[target_feature(enable = "sve")]
28575#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28576#[cfg_attr(test, assert_instr(mul))]
28577pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
28578    svmul_u64_x(pg, op1, svdup_n_u64(op2))
28579}
28580#[doc = "Multiply"]
28581#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"]
28582#[inline(always)]
28583#[target_feature(enable = "sve")]
28584#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28585#[cfg_attr(test, assert_instr(mul))]
28586pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
28587    svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
28588}
28589#[doc = "Multiply"]
28590#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"]
28591#[inline(always)]
28592#[target_feature(enable = "sve")]
28593#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28594#[cfg_attr(test, assert_instr(mul))]
28595pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
28596    svmul_u64_z(pg, op1, svdup_n_u64(op2))
28597}
28598#[doc = "Multiply, returning high-half"]
28599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_m)"]
28600#[inline(always)]
28601#[target_feature(enable = "sve")]
28602#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28603#[cfg_attr(test, assert_instr(smulh))]
28604pub fn svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
28605    unsafe extern "unadjusted" {
28606        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv16i8")]
28607        fn _svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
28608    }
28609    unsafe { _svmulh_s8_m(pg, op1, op2) }
28610}
28611#[doc = "Multiply, returning high-half"]
28612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_m)"]
28613#[inline(always)]
28614#[target_feature(enable = "sve")]
28615#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28616#[cfg_attr(test, assert_instr(smulh))]
28617pub fn svmulh_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
28618    svmulh_s8_m(pg, op1, svdup_n_s8(op2))
28619}
28620#[doc = "Multiply, returning high-half"]
28621#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_x)"]
28622#[inline(always)]
28623#[target_feature(enable = "sve")]
28624#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28625#[cfg_attr(test, assert_instr(smulh))]
28626pub fn svmulh_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
28627    svmulh_s8_m(pg, op1, op2)
28628}
28629#[doc = "Multiply, returning high-half"]
28630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_x)"]
28631#[inline(always)]
28632#[target_feature(enable = "sve")]
28633#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28634#[cfg_attr(test, assert_instr(smulh))]
28635pub fn svmulh_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
28636    svmulh_s8_x(pg, op1, svdup_n_s8(op2))
28637}
28638#[doc = "Multiply, returning high-half"]
28639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_z)"]
28640#[inline(always)]
28641#[target_feature(enable = "sve")]
28642#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28643#[cfg_attr(test, assert_instr(smulh))]
28644pub fn svmulh_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
28645    svmulh_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
28646}
28647#[doc = "Multiply, returning high-half"]
28648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_z)"]
28649#[inline(always)]
28650#[target_feature(enable = "sve")]
28651#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28652#[cfg_attr(test, assert_instr(smulh))]
28653pub fn svmulh_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
28654    svmulh_s8_z(pg, op1, svdup_n_s8(op2))
28655}
28656#[doc = "Multiply, returning high-half"]
28657#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_m)"]
28658#[inline(always)]
28659#[target_feature(enable = "sve")]
28660#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28661#[cfg_attr(test, assert_instr(smulh))]
28662pub fn svmulh_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
28663    unsafe extern "unadjusted" {
28664        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv8i16")]
28665        fn _svmulh_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
28666    }
28667    unsafe { _svmulh_s16_m(pg.sve_into(), op1, op2) }
28668}
28669#[doc = "Multiply, returning high-half"]
28670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_m)"]
28671#[inline(always)]
28672#[target_feature(enable = "sve")]
28673#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28674#[cfg_attr(test, assert_instr(smulh))]
28675pub fn svmulh_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
28676    svmulh_s16_m(pg, op1, svdup_n_s16(op2))
28677}
28678#[doc = "Multiply, returning high-half"]
28679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_x)"]
28680#[inline(always)]
28681#[target_feature(enable = "sve")]
28682#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28683#[cfg_attr(test, assert_instr(smulh))]
28684pub fn svmulh_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
28685    svmulh_s16_m(pg, op1, op2)
28686}
28687#[doc = "Multiply, returning high-half"]
28688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_x)"]
28689#[inline(always)]
28690#[target_feature(enable = "sve")]
28691#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28692#[cfg_attr(test, assert_instr(smulh))]
28693pub fn svmulh_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
28694    svmulh_s16_x(pg, op1, svdup_n_s16(op2))
28695}
28696#[doc = "Multiply, returning high-half"]
28697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_z)"]
28698#[inline(always)]
28699#[target_feature(enable = "sve")]
28700#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28701#[cfg_attr(test, assert_instr(smulh))]
28702pub fn svmulh_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
28703    svmulh_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
28704}
28705#[doc = "Multiply, returning high-half"]
28706#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_z)"]
28707#[inline(always)]
28708#[target_feature(enable = "sve")]
28709#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28710#[cfg_attr(test, assert_instr(smulh))]
28711pub fn svmulh_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
28712    svmulh_s16_z(pg, op1, svdup_n_s16(op2))
28713}
28714#[doc = "Multiply, returning high-half"]
28715#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_m)"]
28716#[inline(always)]
28717#[target_feature(enable = "sve")]
28718#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28719#[cfg_attr(test, assert_instr(smulh))]
28720pub fn svmulh_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
28721    unsafe extern "unadjusted" {
28722        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv4i32")]
28723        fn _svmulh_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
28724    }
28725    unsafe { _svmulh_s32_m(pg.sve_into(), op1, op2) }
28726}
28727#[doc = "Multiply, returning high-half"]
28728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_m)"]
28729#[inline(always)]
28730#[target_feature(enable = "sve")]
28731#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28732#[cfg_attr(test, assert_instr(smulh))]
28733pub fn svmulh_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
28734    svmulh_s32_m(pg, op1, svdup_n_s32(op2))
28735}
28736#[doc = "Multiply, returning high-half"]
28737#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_x)"]
28738#[inline(always)]
28739#[target_feature(enable = "sve")]
28740#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28741#[cfg_attr(test, assert_instr(smulh))]
28742pub fn svmulh_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
28743    svmulh_s32_m(pg, op1, op2)
28744}
28745#[doc = "Multiply, returning high-half"]
28746#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_x)"]
28747#[inline(always)]
28748#[target_feature(enable = "sve")]
28749#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28750#[cfg_attr(test, assert_instr(smulh))]
28751pub fn svmulh_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
28752    svmulh_s32_x(pg, op1, svdup_n_s32(op2))
28753}
28754#[doc = "Multiply, returning high-half"]
28755#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_z)"]
28756#[inline(always)]
28757#[target_feature(enable = "sve")]
28758#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28759#[cfg_attr(test, assert_instr(smulh))]
28760pub fn svmulh_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
28761    svmulh_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
28762}
28763#[doc = "Multiply, returning high-half"]
28764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_z)"]
28765#[inline(always)]
28766#[target_feature(enable = "sve")]
28767#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28768#[cfg_attr(test, assert_instr(smulh))]
28769pub fn svmulh_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
28770    svmulh_s32_z(pg, op1, svdup_n_s32(op2))
28771}
28772#[doc = "Multiply, returning high-half"]
28773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_m)"]
28774#[inline(always)]
28775#[target_feature(enable = "sve")]
28776#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28777#[cfg_attr(test, assert_instr(smulh))]
28778pub fn svmulh_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
28779    unsafe extern "unadjusted" {
28780        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv2i64")]
28781        fn _svmulh_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
28782    }
28783    unsafe { _svmulh_s64_m(pg.sve_into(), op1, op2) }
28784}
28785#[doc = "Multiply, returning high-half"]
28786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_m)"]
28787#[inline(always)]
28788#[target_feature(enable = "sve")]
28789#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28790#[cfg_attr(test, assert_instr(smulh))]
28791pub fn svmulh_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
28792    svmulh_s64_m(pg, op1, svdup_n_s64(op2))
28793}
28794#[doc = "Multiply, returning high-half"]
28795#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_x)"]
28796#[inline(always)]
28797#[target_feature(enable = "sve")]
28798#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28799#[cfg_attr(test, assert_instr(smulh))]
28800pub fn svmulh_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
28801    svmulh_s64_m(pg, op1, op2)
28802}
28803#[doc = "Multiply, returning high-half"]
28804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_x)"]
28805#[inline(always)]
28806#[target_feature(enable = "sve")]
28807#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28808#[cfg_attr(test, assert_instr(smulh))]
28809pub fn svmulh_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
28810    svmulh_s64_x(pg, op1, svdup_n_s64(op2))
28811}
28812#[doc = "Multiply, returning high-half"]
28813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_z)"]
28814#[inline(always)]
28815#[target_feature(enable = "sve")]
28816#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28817#[cfg_attr(test, assert_instr(smulh))]
28818pub fn svmulh_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
28819    svmulh_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
28820}
28821#[doc = "Multiply, returning high-half"]
28822#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_z)"]
28823#[inline(always)]
28824#[target_feature(enable = "sve")]
28825#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28826#[cfg_attr(test, assert_instr(smulh))]
28827pub fn svmulh_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
28828    svmulh_s64_z(pg, op1, svdup_n_s64(op2))
28829}
28830#[doc = "Multiply, returning high-half"]
28831#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_m)"]
28832#[inline(always)]
28833#[target_feature(enable = "sve")]
28834#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28835#[cfg_attr(test, assert_instr(umulh))]
28836pub fn svmulh_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
28837    unsafe extern "unadjusted" {
28838        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv16i8")]
28839        fn _svmulh_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
28840    }
28841    unsafe { _svmulh_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
28842}
28843#[doc = "Multiply, returning high-half"]
28844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_m)"]
28845#[inline(always)]
28846#[target_feature(enable = "sve")]
28847#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28848#[cfg_attr(test, assert_instr(umulh))]
28849pub fn svmulh_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
28850    svmulh_u8_m(pg, op1, svdup_n_u8(op2))
28851}
28852#[doc = "Multiply, returning high-half"]
28853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_x)"]
28854#[inline(always)]
28855#[target_feature(enable = "sve")]
28856#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28857#[cfg_attr(test, assert_instr(umulh))]
28858pub fn svmulh_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
28859    svmulh_u8_m(pg, op1, op2)
28860}
28861#[doc = "Multiply, returning high-half"]
28862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_x)"]
28863#[inline(always)]
28864#[target_feature(enable = "sve")]
28865#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28866#[cfg_attr(test, assert_instr(umulh))]
28867pub fn svmulh_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
28868    svmulh_u8_x(pg, op1, svdup_n_u8(op2))
28869}
28870#[doc = "Multiply, returning high-half"]
28871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_z)"]
28872#[inline(always)]
28873#[target_feature(enable = "sve")]
28874#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28875#[cfg_attr(test, assert_instr(umulh))]
28876pub fn svmulh_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
28877    svmulh_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
28878}
28879#[doc = "Multiply, returning high-half"]
28880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_z)"]
28881#[inline(always)]
28882#[target_feature(enable = "sve")]
28883#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28884#[cfg_attr(test, assert_instr(umulh))]
28885pub fn svmulh_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
28886    svmulh_u8_z(pg, op1, svdup_n_u8(op2))
28887}
28888#[doc = "Multiply, returning high-half"]
28889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_m)"]
28890#[inline(always)]
28891#[target_feature(enable = "sve")]
28892#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28893#[cfg_attr(test, assert_instr(umulh))]
28894pub fn svmulh_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
28895    unsafe extern "unadjusted" {
28896        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv8i16")]
28897        fn _svmulh_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
28898    }
28899    unsafe { _svmulh_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
28900}
28901#[doc = "Multiply, returning high-half"]
28902#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_m)"]
28903#[inline(always)]
28904#[target_feature(enable = "sve")]
28905#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28906#[cfg_attr(test, assert_instr(umulh))]
28907pub fn svmulh_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
28908    svmulh_u16_m(pg, op1, svdup_n_u16(op2))
28909}
28910#[doc = "Multiply, returning high-half"]
28911#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_x)"]
28912#[inline(always)]
28913#[target_feature(enable = "sve")]
28914#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28915#[cfg_attr(test, assert_instr(umulh))]
28916pub fn svmulh_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
28917    svmulh_u16_m(pg, op1, op2)
28918}
28919#[doc = "Multiply, returning high-half"]
28920#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_x)"]
28921#[inline(always)]
28922#[target_feature(enable = "sve")]
28923#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28924#[cfg_attr(test, assert_instr(umulh))]
28925pub fn svmulh_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
28926    svmulh_u16_x(pg, op1, svdup_n_u16(op2))
28927}
28928#[doc = "Multiply, returning high-half"]
28929#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_z)"]
28930#[inline(always)]
28931#[target_feature(enable = "sve")]
28932#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28933#[cfg_attr(test, assert_instr(umulh))]
28934pub fn svmulh_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
28935    svmulh_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
28936}
28937#[doc = "Multiply, returning high-half"]
28938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_z)"]
28939#[inline(always)]
28940#[target_feature(enable = "sve")]
28941#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28942#[cfg_attr(test, assert_instr(umulh))]
28943pub fn svmulh_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
28944    svmulh_u16_z(pg, op1, svdup_n_u16(op2))
28945}
28946#[doc = "Multiply, returning high-half"]
28947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_m)"]
28948#[inline(always)]
28949#[target_feature(enable = "sve")]
28950#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28951#[cfg_attr(test, assert_instr(umulh))]
28952pub fn svmulh_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
28953    unsafe extern "unadjusted" {
28954        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv4i32")]
28955        fn _svmulh_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
28956    }
28957    unsafe { _svmulh_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
28958}
28959#[doc = "Multiply, returning high-half"]
28960#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_m)"]
28961#[inline(always)]
28962#[target_feature(enable = "sve")]
28963#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28964#[cfg_attr(test, assert_instr(umulh))]
28965pub fn svmulh_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
28966    svmulh_u32_m(pg, op1, svdup_n_u32(op2))
28967}
28968#[doc = "Multiply, returning high-half"]
28969#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_x)"]
28970#[inline(always)]
28971#[target_feature(enable = "sve")]
28972#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28973#[cfg_attr(test, assert_instr(umulh))]
28974pub fn svmulh_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
28975    svmulh_u32_m(pg, op1, op2)
28976}
28977#[doc = "Multiply, returning high-half"]
28978#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_x)"]
28979#[inline(always)]
28980#[target_feature(enable = "sve")]
28981#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28982#[cfg_attr(test, assert_instr(umulh))]
28983pub fn svmulh_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
28984    svmulh_u32_x(pg, op1, svdup_n_u32(op2))
28985}
28986#[doc = "Multiply, returning high-half"]
28987#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_z)"]
28988#[inline(always)]
28989#[target_feature(enable = "sve")]
28990#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
28991#[cfg_attr(test, assert_instr(umulh))]
28992pub fn svmulh_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
28993    svmulh_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
28994}
28995#[doc = "Multiply, returning high-half"]
28996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_z)"]
28997#[inline(always)]
28998#[target_feature(enable = "sve")]
28999#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29000#[cfg_attr(test, assert_instr(umulh))]
29001pub fn svmulh_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
29002    svmulh_u32_z(pg, op1, svdup_n_u32(op2))
29003}
29004#[doc = "Multiply, returning high-half"]
29005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_m)"]
29006#[inline(always)]
29007#[target_feature(enable = "sve")]
29008#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29009#[cfg_attr(test, assert_instr(umulh))]
29010pub fn svmulh_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
29011    unsafe extern "unadjusted" {
29012        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv2i64")]
29013        fn _svmulh_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
29014    }
29015    unsafe { _svmulh_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
29016}
29017#[doc = "Multiply, returning high-half"]
29018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_m)"]
29019#[inline(always)]
29020#[target_feature(enable = "sve")]
29021#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29022#[cfg_attr(test, assert_instr(umulh))]
29023pub fn svmulh_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
29024    svmulh_u64_m(pg, op1, svdup_n_u64(op2))
29025}
29026#[doc = "Multiply, returning high-half"]
29027#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_x)"]
29028#[inline(always)]
29029#[target_feature(enable = "sve")]
29030#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29031#[cfg_attr(test, assert_instr(umulh))]
29032pub fn svmulh_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
29033    svmulh_u64_m(pg, op1, op2)
29034}
29035#[doc = "Multiply, returning high-half"]
29036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_x)"]
29037#[inline(always)]
29038#[target_feature(enable = "sve")]
29039#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29040#[cfg_attr(test, assert_instr(umulh))]
29041pub fn svmulh_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
29042    svmulh_u64_x(pg, op1, svdup_n_u64(op2))
29043}
29044#[doc = "Multiply, returning high-half"]
29045#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_z)"]
29046#[inline(always)]
29047#[target_feature(enable = "sve")]
29048#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29049#[cfg_attr(test, assert_instr(umulh))]
29050pub fn svmulh_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
29051    svmulh_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
29052}
29053#[doc = "Multiply, returning high-half"]
29054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_z)"]
29055#[inline(always)]
29056#[target_feature(enable = "sve")]
29057#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29058#[cfg_attr(test, assert_instr(umulh))]
29059pub fn svmulh_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
29060    svmulh_u64_z(pg, op1, svdup_n_u64(op2))
29061}
29062#[doc = "Multiply extended (∞×0=2)"]
29063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_m)"]
29064#[inline(always)]
29065#[target_feature(enable = "sve")]
29066#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29067#[cfg_attr(test, assert_instr(fmulx))]
29068pub fn svmulx_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
29069    unsafe extern "unadjusted" {
29070        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv4f32")]
29071        fn _svmulx_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
29072    }
29073    unsafe { _svmulx_f32_m(pg.sve_into(), op1, op2) }
29074}
29075#[doc = "Multiply extended (∞×0=2)"]
29076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_m)"]
29077#[inline(always)]
29078#[target_feature(enable = "sve")]
29079#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29080#[cfg_attr(test, assert_instr(fmulx))]
29081pub fn svmulx_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
29082    svmulx_f32_m(pg, op1, svdup_n_f32(op2))
29083}
29084#[doc = "Multiply extended (∞×0=2)"]
29085#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_x)"]
29086#[inline(always)]
29087#[target_feature(enable = "sve")]
29088#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29089#[cfg_attr(test, assert_instr(fmulx))]
29090pub fn svmulx_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
29091    svmulx_f32_m(pg, op1, op2)
29092}
29093#[doc = "Multiply extended (∞×0=2)"]
29094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_x)"]
29095#[inline(always)]
29096#[target_feature(enable = "sve")]
29097#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29098#[cfg_attr(test, assert_instr(fmulx))]
29099pub fn svmulx_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
29100    svmulx_f32_x(pg, op1, svdup_n_f32(op2))
29101}
29102#[doc = "Multiply extended (∞×0=2)"]
29103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_z)"]
29104#[inline(always)]
29105#[target_feature(enable = "sve")]
29106#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29107#[cfg_attr(test, assert_instr(fmulx))]
29108pub fn svmulx_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
29109    svmulx_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
29110}
29111#[doc = "Multiply extended (∞×0=2)"]
29112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_z)"]
29113#[inline(always)]
29114#[target_feature(enable = "sve")]
29115#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29116#[cfg_attr(test, assert_instr(fmulx))]
29117pub fn svmulx_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
29118    svmulx_f32_z(pg, op1, svdup_n_f32(op2))
29119}
29120#[doc = "Multiply extended (∞×0=2)"]
29121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_m)"]
29122#[inline(always)]
29123#[target_feature(enable = "sve")]
29124#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29125#[cfg_attr(test, assert_instr(fmulx))]
29126pub fn svmulx_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
29127    unsafe extern "unadjusted" {
29128        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv2f64")]
29129        fn _svmulx_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
29130    }
29131    unsafe { _svmulx_f64_m(pg.sve_into(), op1, op2) }
29132}
29133#[doc = "Multiply extended (∞×0=2)"]
29134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_m)"]
29135#[inline(always)]
29136#[target_feature(enable = "sve")]
29137#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29138#[cfg_attr(test, assert_instr(fmulx))]
29139pub fn svmulx_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
29140    svmulx_f64_m(pg, op1, svdup_n_f64(op2))
29141}
29142#[doc = "Multiply extended (∞×0=2)"]
29143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_x)"]
29144#[inline(always)]
29145#[target_feature(enable = "sve")]
29146#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29147#[cfg_attr(test, assert_instr(fmulx))]
29148pub fn svmulx_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
29149    svmulx_f64_m(pg, op1, op2)
29150}
29151#[doc = "Multiply extended (∞×0=2)"]
29152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_x)"]
29153#[inline(always)]
29154#[target_feature(enable = "sve")]
29155#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29156#[cfg_attr(test, assert_instr(fmulx))]
29157pub fn svmulx_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
29158    svmulx_f64_x(pg, op1, svdup_n_f64(op2))
29159}
29160#[doc = "Multiply extended (∞×0=2)"]
29161#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_z)"]
29162#[inline(always)]
29163#[target_feature(enable = "sve")]
29164#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29165#[cfg_attr(test, assert_instr(fmulx))]
29166pub fn svmulx_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
29167    svmulx_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
29168}
29169#[doc = "Multiply extended (∞×0=2)"]
29170#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_z)"]
29171#[inline(always)]
29172#[target_feature(enable = "sve")]
29173#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29174#[cfg_attr(test, assert_instr(fmulx))]
29175pub fn svmulx_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
29176    svmulx_f64_z(pg, op1, svdup_n_f64(op2))
29177}
29178#[doc = "Bitwise NAND"]
29179#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnand[_b]_z)"]
29180#[inline(always)]
29181#[target_feature(enable = "sve")]
29182#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29183#[cfg_attr(test, assert_instr(nand))]
29184pub fn svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
29185    unsafe extern "unadjusted" {
29186        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nand.z.nxv16i1")]
29187        fn _svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
29188    }
29189    unsafe { _svnand_b_z(pg, op1, op2) }
29190}
29191#[doc = "Negate"]
29192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_m)"]
29193#[inline(always)]
29194#[target_feature(enable = "sve")]
29195#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29196#[cfg_attr(test, assert_instr(fneg))]
29197pub fn svneg_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
29198    unsafe extern "unadjusted" {
29199        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv4f32")]
29200        fn _svneg_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
29201    }
29202    unsafe { _svneg_f32_m(inactive, pg.sve_into(), op) }
29203}
29204#[doc = "Negate"]
29205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_x)"]
29206#[inline(always)]
29207#[target_feature(enable = "sve")]
29208#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29209#[cfg_attr(test, assert_instr(fneg))]
29210pub fn svneg_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
29211    svneg_f32_m(op, pg, op)
29212}
29213#[doc = "Negate"]
29214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_z)"]
29215#[inline(always)]
29216#[target_feature(enable = "sve")]
29217#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29218#[cfg_attr(test, assert_instr(fneg))]
29219pub fn svneg_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
29220    svneg_f32_m(svdup_n_f32(0.0), pg, op)
29221}
29222#[doc = "Negate"]
29223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_m)"]
29224#[inline(always)]
29225#[target_feature(enable = "sve")]
29226#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29227#[cfg_attr(test, assert_instr(fneg))]
29228pub fn svneg_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
29229    unsafe extern "unadjusted" {
29230        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv2f64")]
29231        fn _svneg_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
29232    }
29233    unsafe { _svneg_f64_m(inactive, pg.sve_into(), op) }
29234}
29235#[doc = "Negate"]
29236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_x)"]
29237#[inline(always)]
29238#[target_feature(enable = "sve")]
29239#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29240#[cfg_attr(test, assert_instr(fneg))]
29241pub fn svneg_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
29242    svneg_f64_m(op, pg, op)
29243}
29244#[doc = "Negate"]
29245#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_z)"]
29246#[inline(always)]
29247#[target_feature(enable = "sve")]
29248#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29249#[cfg_attr(test, assert_instr(fneg))]
29250pub fn svneg_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
29251    svneg_f64_m(svdup_n_f64(0.0), pg, op)
29252}
29253#[doc = "Negate"]
29254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_m)"]
29255#[inline(always)]
29256#[target_feature(enable = "sve")]
29257#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29258#[cfg_attr(test, assert_instr(neg))]
29259pub fn svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
29260    unsafe extern "unadjusted" {
29261        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv16i8")]
29262        fn _svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
29263    }
29264    unsafe { _svneg_s8_m(inactive, pg, op) }
29265}
29266#[doc = "Negate"]
29267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_x)"]
29268#[inline(always)]
29269#[target_feature(enable = "sve")]
29270#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29271#[cfg_attr(test, assert_instr(neg))]
29272pub fn svneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
29273    svneg_s8_m(op, pg, op)
29274}
29275#[doc = "Negate"]
29276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_z)"]
29277#[inline(always)]
29278#[target_feature(enable = "sve")]
29279#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29280#[cfg_attr(test, assert_instr(neg))]
29281pub fn svneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
29282    svneg_s8_m(svdup_n_s8(0), pg, op)
29283}
29284#[doc = "Negate"]
29285#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_m)"]
29286#[inline(always)]
29287#[target_feature(enable = "sve")]
29288#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29289#[cfg_attr(test, assert_instr(neg))]
29290pub fn svneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
29291    unsafe extern "unadjusted" {
29292        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv8i16")]
29293        fn _svneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
29294    }
29295    unsafe { _svneg_s16_m(inactive, pg.sve_into(), op) }
29296}
29297#[doc = "Negate"]
29298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_x)"]
29299#[inline(always)]
29300#[target_feature(enable = "sve")]
29301#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29302#[cfg_attr(test, assert_instr(neg))]
29303pub fn svneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
29304    svneg_s16_m(op, pg, op)
29305}
29306#[doc = "Negate"]
29307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_z)"]
29308#[inline(always)]
29309#[target_feature(enable = "sve")]
29310#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29311#[cfg_attr(test, assert_instr(neg))]
29312pub fn svneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
29313    svneg_s16_m(svdup_n_s16(0), pg, op)
29314}
29315#[doc = "Negate"]
29316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_m)"]
29317#[inline(always)]
29318#[target_feature(enable = "sve")]
29319#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29320#[cfg_attr(test, assert_instr(neg))]
29321pub fn svneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
29322    unsafe extern "unadjusted" {
29323        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv4i32")]
29324        fn _svneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
29325    }
29326    unsafe { _svneg_s32_m(inactive, pg.sve_into(), op) }
29327}
29328#[doc = "Negate"]
29329#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_x)"]
29330#[inline(always)]
29331#[target_feature(enable = "sve")]
29332#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29333#[cfg_attr(test, assert_instr(neg))]
29334pub fn svneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
29335    svneg_s32_m(op, pg, op)
29336}
29337#[doc = "Negate"]
29338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_z)"]
29339#[inline(always)]
29340#[target_feature(enable = "sve")]
29341#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29342#[cfg_attr(test, assert_instr(neg))]
29343pub fn svneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
29344    svneg_s32_m(svdup_n_s32(0), pg, op)
29345}
29346#[doc = "Negate"]
29347#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_m)"]
29348#[inline(always)]
29349#[target_feature(enable = "sve")]
29350#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29351#[cfg_attr(test, assert_instr(neg))]
29352pub fn svneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
29353    unsafe extern "unadjusted" {
29354        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv2i64")]
29355        fn _svneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
29356    }
29357    unsafe { _svneg_s64_m(inactive, pg.sve_into(), op) }
29358}
29359#[doc = "Negate"]
29360#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_x)"]
29361#[inline(always)]
29362#[target_feature(enable = "sve")]
29363#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29364#[cfg_attr(test, assert_instr(neg))]
29365pub fn svneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
29366    svneg_s64_m(op, pg, op)
29367}
29368#[doc = "Negate"]
29369#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_z)"]
29370#[inline(always)]
29371#[target_feature(enable = "sve")]
29372#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29373#[cfg_attr(test, assert_instr(neg))]
29374pub fn svneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
29375    svneg_s64_m(svdup_n_s64(0), pg, op)
29376}
29377#[doc = "Negated multiply-add, multiplicand first"]
29378#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_m)"]
29379#[inline(always)]
29380#[target_feature(enable = "sve")]
29381#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29382#[cfg_attr(test, assert_instr(fnmad))]
29383pub fn svnmad_f32_m(
29384    pg: svbool_t,
29385    op1: svfloat32_t,
29386    op2: svfloat32_t,
29387    op3: svfloat32_t,
29388) -> svfloat32_t {
29389    unsafe extern "unadjusted" {
29390        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv4f32")]
29391        fn _svnmad_f32_m(
29392            pg: svbool4_t,
29393            op1: svfloat32_t,
29394            op2: svfloat32_t,
29395            op3: svfloat32_t,
29396        ) -> svfloat32_t;
29397    }
29398    unsafe { _svnmad_f32_m(pg.sve_into(), op1, op2, op3) }
29399}
29400#[doc = "Negated multiply-add, multiplicand first"]
29401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_m)"]
29402#[inline(always)]
29403#[target_feature(enable = "sve")]
29404#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29405#[cfg_attr(test, assert_instr(fnmad))]
29406pub fn svnmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29407    svnmad_f32_m(pg, op1, op2, svdup_n_f32(op3))
29408}
29409#[doc = "Negated multiply-add, multiplicand first"]
29410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_x)"]
29411#[inline(always)]
29412#[target_feature(enable = "sve")]
29413#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29414#[cfg_attr(test, assert_instr(fnmad))]
29415pub fn svnmad_f32_x(
29416    pg: svbool_t,
29417    op1: svfloat32_t,
29418    op2: svfloat32_t,
29419    op3: svfloat32_t,
29420) -> svfloat32_t {
29421    svnmad_f32_m(pg, op1, op2, op3)
29422}
29423#[doc = "Negated multiply-add, multiplicand first"]
29424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_x)"]
29425#[inline(always)]
29426#[target_feature(enable = "sve")]
29427#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29428#[cfg_attr(test, assert_instr(fnmad))]
29429pub fn svnmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29430    svnmad_f32_x(pg, op1, op2, svdup_n_f32(op3))
29431}
29432#[doc = "Negated multiply-add, multiplicand first"]
29433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_z)"]
29434#[inline(always)]
29435#[target_feature(enable = "sve")]
29436#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29437#[cfg_attr(test, assert_instr(fnmad))]
29438pub fn svnmad_f32_z(
29439    pg: svbool_t,
29440    op1: svfloat32_t,
29441    op2: svfloat32_t,
29442    op3: svfloat32_t,
29443) -> svfloat32_t {
29444    svnmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
29445}
29446#[doc = "Negated multiply-add, multiplicand first"]
29447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_z)"]
29448#[inline(always)]
29449#[target_feature(enable = "sve")]
29450#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29451#[cfg_attr(test, assert_instr(fnmad))]
29452pub fn svnmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29453    svnmad_f32_z(pg, op1, op2, svdup_n_f32(op3))
29454}
29455#[doc = "Negated multiply-add, multiplicand first"]
29456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_m)"]
29457#[inline(always)]
29458#[target_feature(enable = "sve")]
29459#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29460#[cfg_attr(test, assert_instr(fnmad))]
29461pub fn svnmad_f64_m(
29462    pg: svbool_t,
29463    op1: svfloat64_t,
29464    op2: svfloat64_t,
29465    op3: svfloat64_t,
29466) -> svfloat64_t {
29467    unsafe extern "unadjusted" {
29468        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv2f64")]
29469        fn _svnmad_f64_m(
29470            pg: svbool2_t,
29471            op1: svfloat64_t,
29472            op2: svfloat64_t,
29473            op3: svfloat64_t,
29474        ) -> svfloat64_t;
29475    }
29476    unsafe { _svnmad_f64_m(pg.sve_into(), op1, op2, op3) }
29477}
29478#[doc = "Negated multiply-add, multiplicand first"]
29479#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_m)"]
29480#[inline(always)]
29481#[target_feature(enable = "sve")]
29482#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29483#[cfg_attr(test, assert_instr(fnmad))]
29484pub fn svnmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29485    svnmad_f64_m(pg, op1, op2, svdup_n_f64(op3))
29486}
29487#[doc = "Negated multiply-add, multiplicand first"]
29488#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_x)"]
29489#[inline(always)]
29490#[target_feature(enable = "sve")]
29491#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29492#[cfg_attr(test, assert_instr(fnmad))]
29493pub fn svnmad_f64_x(
29494    pg: svbool_t,
29495    op1: svfloat64_t,
29496    op2: svfloat64_t,
29497    op3: svfloat64_t,
29498) -> svfloat64_t {
29499    svnmad_f64_m(pg, op1, op2, op3)
29500}
29501#[doc = "Negated multiply-add, multiplicand first"]
29502#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_x)"]
29503#[inline(always)]
29504#[target_feature(enable = "sve")]
29505#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29506#[cfg_attr(test, assert_instr(fnmad))]
29507pub fn svnmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29508    svnmad_f64_x(pg, op1, op2, svdup_n_f64(op3))
29509}
29510#[doc = "Negated multiply-add, multiplicand first"]
29511#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_z)"]
29512#[inline(always)]
29513#[target_feature(enable = "sve")]
29514#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29515#[cfg_attr(test, assert_instr(fnmad))]
29516pub fn svnmad_f64_z(
29517    pg: svbool_t,
29518    op1: svfloat64_t,
29519    op2: svfloat64_t,
29520    op3: svfloat64_t,
29521) -> svfloat64_t {
29522    svnmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
29523}
29524#[doc = "Negated multiply-add, multiplicand first"]
29525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_z)"]
29526#[inline(always)]
29527#[target_feature(enable = "sve")]
29528#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29529#[cfg_attr(test, assert_instr(fnmad))]
29530pub fn svnmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29531    svnmad_f64_z(pg, op1, op2, svdup_n_f64(op3))
29532}
29533#[doc = "Negated multiply-add, addend first"]
29534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_m)"]
29535#[inline(always)]
29536#[target_feature(enable = "sve")]
29537#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29538#[cfg_attr(test, assert_instr(fnmla))]
29539pub fn svnmla_f32_m(
29540    pg: svbool_t,
29541    op1: svfloat32_t,
29542    op2: svfloat32_t,
29543    op3: svfloat32_t,
29544) -> svfloat32_t {
29545    unsafe extern "unadjusted" {
29546        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv4f32")]
29547        fn _svnmla_f32_m(
29548            pg: svbool4_t,
29549            op1: svfloat32_t,
29550            op2: svfloat32_t,
29551            op3: svfloat32_t,
29552        ) -> svfloat32_t;
29553    }
29554    unsafe { _svnmla_f32_m(pg.sve_into(), op1, op2, op3) }
29555}
29556#[doc = "Negated multiply-add, addend first"]
29557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_m)"]
29558#[inline(always)]
29559#[target_feature(enable = "sve")]
29560#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29561#[cfg_attr(test, assert_instr(fnmla))]
29562pub fn svnmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29563    svnmla_f32_m(pg, op1, op2, svdup_n_f32(op3))
29564}
29565#[doc = "Negated multiply-add, addend first"]
29566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_x)"]
29567#[inline(always)]
29568#[target_feature(enable = "sve")]
29569#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29570#[cfg_attr(test, assert_instr(fnmla))]
29571pub fn svnmla_f32_x(
29572    pg: svbool_t,
29573    op1: svfloat32_t,
29574    op2: svfloat32_t,
29575    op3: svfloat32_t,
29576) -> svfloat32_t {
29577    svnmla_f32_m(pg, op1, op2, op3)
29578}
29579#[doc = "Negated multiply-add, addend first"]
29580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_x)"]
29581#[inline(always)]
29582#[target_feature(enable = "sve")]
29583#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29584#[cfg_attr(test, assert_instr(fnmla))]
29585pub fn svnmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29586    svnmla_f32_x(pg, op1, op2, svdup_n_f32(op3))
29587}
29588#[doc = "Negated multiply-add, addend first"]
29589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_z)"]
29590#[inline(always)]
29591#[target_feature(enable = "sve")]
29592#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29593#[cfg_attr(test, assert_instr(fnmla))]
29594pub fn svnmla_f32_z(
29595    pg: svbool_t,
29596    op1: svfloat32_t,
29597    op2: svfloat32_t,
29598    op3: svfloat32_t,
29599) -> svfloat32_t {
29600    svnmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
29601}
29602#[doc = "Negated multiply-add, addend first"]
29603#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_z)"]
29604#[inline(always)]
29605#[target_feature(enable = "sve")]
29606#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29607#[cfg_attr(test, assert_instr(fnmla))]
29608pub fn svnmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29609    svnmla_f32_z(pg, op1, op2, svdup_n_f32(op3))
29610}
29611#[doc = "Negated multiply-add, addend first"]
29612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_m)"]
29613#[inline(always)]
29614#[target_feature(enable = "sve")]
29615#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29616#[cfg_attr(test, assert_instr(fnmla))]
29617pub fn svnmla_f64_m(
29618    pg: svbool_t,
29619    op1: svfloat64_t,
29620    op2: svfloat64_t,
29621    op3: svfloat64_t,
29622) -> svfloat64_t {
29623    unsafe extern "unadjusted" {
29624        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv2f64")]
29625        fn _svnmla_f64_m(
29626            pg: svbool2_t,
29627            op1: svfloat64_t,
29628            op2: svfloat64_t,
29629            op3: svfloat64_t,
29630        ) -> svfloat64_t;
29631    }
29632    unsafe { _svnmla_f64_m(pg.sve_into(), op1, op2, op3) }
29633}
29634#[doc = "Negated multiply-add, addend first"]
29635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_m)"]
29636#[inline(always)]
29637#[target_feature(enable = "sve")]
29638#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29639#[cfg_attr(test, assert_instr(fnmla))]
29640pub fn svnmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29641    svnmla_f64_m(pg, op1, op2, svdup_n_f64(op3))
29642}
29643#[doc = "Negated multiply-add, addend first"]
29644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_x)"]
29645#[inline(always)]
29646#[target_feature(enable = "sve")]
29647#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29648#[cfg_attr(test, assert_instr(fnmla))]
29649pub fn svnmla_f64_x(
29650    pg: svbool_t,
29651    op1: svfloat64_t,
29652    op2: svfloat64_t,
29653    op3: svfloat64_t,
29654) -> svfloat64_t {
29655    svnmla_f64_m(pg, op1, op2, op3)
29656}
29657#[doc = "Negated multiply-add, addend first"]
29658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_x)"]
29659#[inline(always)]
29660#[target_feature(enable = "sve")]
29661#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29662#[cfg_attr(test, assert_instr(fnmla))]
29663pub fn svnmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29664    svnmla_f64_x(pg, op1, op2, svdup_n_f64(op3))
29665}
29666#[doc = "Negated multiply-add, addend first"]
29667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_z)"]
29668#[inline(always)]
29669#[target_feature(enable = "sve")]
29670#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29671#[cfg_attr(test, assert_instr(fnmla))]
29672pub fn svnmla_f64_z(
29673    pg: svbool_t,
29674    op1: svfloat64_t,
29675    op2: svfloat64_t,
29676    op3: svfloat64_t,
29677) -> svfloat64_t {
29678    svnmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
29679}
29680#[doc = "Negated multiply-add, addend first"]
29681#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_z)"]
29682#[inline(always)]
29683#[target_feature(enable = "sve")]
29684#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29685#[cfg_attr(test, assert_instr(fnmla))]
29686pub fn svnmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29687    svnmla_f64_z(pg, op1, op2, svdup_n_f64(op3))
29688}
29689#[doc = "Negated multiply-subtract, minuend first"]
29690#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_m)"]
29691#[inline(always)]
29692#[target_feature(enable = "sve")]
29693#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29694#[cfg_attr(test, assert_instr(fnmls))]
29695pub fn svnmls_f32_m(
29696    pg: svbool_t,
29697    op1: svfloat32_t,
29698    op2: svfloat32_t,
29699    op3: svfloat32_t,
29700) -> svfloat32_t {
29701    unsafe extern "unadjusted" {
29702        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv4f32")]
29703        fn _svnmls_f32_m(
29704            pg: svbool4_t,
29705            op1: svfloat32_t,
29706            op2: svfloat32_t,
29707            op3: svfloat32_t,
29708        ) -> svfloat32_t;
29709    }
29710    unsafe { _svnmls_f32_m(pg.sve_into(), op1, op2, op3) }
29711}
29712#[doc = "Negated multiply-subtract, minuend first"]
29713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_m)"]
29714#[inline(always)]
29715#[target_feature(enable = "sve")]
29716#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29717#[cfg_attr(test, assert_instr(fnmls))]
29718pub fn svnmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29719    svnmls_f32_m(pg, op1, op2, svdup_n_f32(op3))
29720}
29721#[doc = "Negated multiply-subtract, minuend first"]
29722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_x)"]
29723#[inline(always)]
29724#[target_feature(enable = "sve")]
29725#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29726#[cfg_attr(test, assert_instr(fnmls))]
29727pub fn svnmls_f32_x(
29728    pg: svbool_t,
29729    op1: svfloat32_t,
29730    op2: svfloat32_t,
29731    op3: svfloat32_t,
29732) -> svfloat32_t {
29733    svnmls_f32_m(pg, op1, op2, op3)
29734}
29735#[doc = "Negated multiply-subtract, minuend first"]
29736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_x)"]
29737#[inline(always)]
29738#[target_feature(enable = "sve")]
29739#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29740#[cfg_attr(test, assert_instr(fnmls))]
29741pub fn svnmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29742    svnmls_f32_x(pg, op1, op2, svdup_n_f32(op3))
29743}
29744#[doc = "Negated multiply-subtract, minuend first"]
29745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_z)"]
29746#[inline(always)]
29747#[target_feature(enable = "sve")]
29748#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29749#[cfg_attr(test, assert_instr(fnmls))]
29750pub fn svnmls_f32_z(
29751    pg: svbool_t,
29752    op1: svfloat32_t,
29753    op2: svfloat32_t,
29754    op3: svfloat32_t,
29755) -> svfloat32_t {
29756    svnmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
29757}
29758#[doc = "Negated multiply-subtract, minuend first"]
29759#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_z)"]
29760#[inline(always)]
29761#[target_feature(enable = "sve")]
29762#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29763#[cfg_attr(test, assert_instr(fnmls))]
29764pub fn svnmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29765    svnmls_f32_z(pg, op1, op2, svdup_n_f32(op3))
29766}
29767#[doc = "Negated multiply-subtract, minuend first"]
29768#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_m)"]
29769#[inline(always)]
29770#[target_feature(enable = "sve")]
29771#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29772#[cfg_attr(test, assert_instr(fnmls))]
29773pub fn svnmls_f64_m(
29774    pg: svbool_t,
29775    op1: svfloat64_t,
29776    op2: svfloat64_t,
29777    op3: svfloat64_t,
29778) -> svfloat64_t {
29779    unsafe extern "unadjusted" {
29780        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv2f64")]
29781        fn _svnmls_f64_m(
29782            pg: svbool2_t,
29783            op1: svfloat64_t,
29784            op2: svfloat64_t,
29785            op3: svfloat64_t,
29786        ) -> svfloat64_t;
29787    }
29788    unsafe { _svnmls_f64_m(pg.sve_into(), op1, op2, op3) }
29789}
29790#[doc = "Negated multiply-subtract, minuend first"]
29791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_m)"]
29792#[inline(always)]
29793#[target_feature(enable = "sve")]
29794#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29795#[cfg_attr(test, assert_instr(fnmls))]
29796pub fn svnmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29797    svnmls_f64_m(pg, op1, op2, svdup_n_f64(op3))
29798}
29799#[doc = "Negated multiply-subtract, minuend first"]
29800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_x)"]
29801#[inline(always)]
29802#[target_feature(enable = "sve")]
29803#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29804#[cfg_attr(test, assert_instr(fnmls))]
29805pub fn svnmls_f64_x(
29806    pg: svbool_t,
29807    op1: svfloat64_t,
29808    op2: svfloat64_t,
29809    op3: svfloat64_t,
29810) -> svfloat64_t {
29811    svnmls_f64_m(pg, op1, op2, op3)
29812}
29813#[doc = "Negated multiply-subtract, minuend first"]
29814#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_x)"]
29815#[inline(always)]
29816#[target_feature(enable = "sve")]
29817#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29818#[cfg_attr(test, assert_instr(fnmls))]
29819pub fn svnmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29820    svnmls_f64_x(pg, op1, op2, svdup_n_f64(op3))
29821}
29822#[doc = "Negated multiply-subtract, minuend first"]
29823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_z)"]
29824#[inline(always)]
29825#[target_feature(enable = "sve")]
29826#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29827#[cfg_attr(test, assert_instr(fnmls))]
29828pub fn svnmls_f64_z(
29829    pg: svbool_t,
29830    op1: svfloat64_t,
29831    op2: svfloat64_t,
29832    op3: svfloat64_t,
29833) -> svfloat64_t {
29834    svnmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
29835}
29836#[doc = "Negated multiply-subtract, minuend first"]
29837#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_z)"]
29838#[inline(always)]
29839#[target_feature(enable = "sve")]
29840#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29841#[cfg_attr(test, assert_instr(fnmls))]
29842pub fn svnmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29843    svnmls_f64_z(pg, op1, op2, svdup_n_f64(op3))
29844}
29845#[doc = "Negated multiply-subtract, multiplicand first"]
29846#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_m)"]
29847#[inline(always)]
29848#[target_feature(enable = "sve")]
29849#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29850#[cfg_attr(test, assert_instr(fnmsb))]
29851pub fn svnmsb_f32_m(
29852    pg: svbool_t,
29853    op1: svfloat32_t,
29854    op2: svfloat32_t,
29855    op3: svfloat32_t,
29856) -> svfloat32_t {
29857    unsafe extern "unadjusted" {
29858        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv4f32")]
29859        fn _svnmsb_f32_m(
29860            pg: svbool4_t,
29861            op1: svfloat32_t,
29862            op2: svfloat32_t,
29863            op3: svfloat32_t,
29864        ) -> svfloat32_t;
29865    }
29866    unsafe { _svnmsb_f32_m(pg.sve_into(), op1, op2, op3) }
29867}
29868#[doc = "Negated multiply-subtract, multiplicand first"]
29869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_m)"]
29870#[inline(always)]
29871#[target_feature(enable = "sve")]
29872#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29873#[cfg_attr(test, assert_instr(fnmsb))]
29874pub fn svnmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29875    svnmsb_f32_m(pg, op1, op2, svdup_n_f32(op3))
29876}
29877#[doc = "Negated multiply-subtract, multiplicand first"]
29878#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_x)"]
29879#[inline(always)]
29880#[target_feature(enable = "sve")]
29881#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29882#[cfg_attr(test, assert_instr(fnmsb))]
29883pub fn svnmsb_f32_x(
29884    pg: svbool_t,
29885    op1: svfloat32_t,
29886    op2: svfloat32_t,
29887    op3: svfloat32_t,
29888) -> svfloat32_t {
29889    svnmsb_f32_m(pg, op1, op2, op3)
29890}
29891#[doc = "Negated multiply-subtract, multiplicand first"]
29892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_x)"]
29893#[inline(always)]
29894#[target_feature(enable = "sve")]
29895#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29896#[cfg_attr(test, assert_instr(fnmsb))]
29897pub fn svnmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29898    svnmsb_f32_x(pg, op1, op2, svdup_n_f32(op3))
29899}
29900#[doc = "Negated multiply-subtract, multiplicand first"]
29901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_z)"]
29902#[inline(always)]
29903#[target_feature(enable = "sve")]
29904#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29905#[cfg_attr(test, assert_instr(fnmsb))]
29906pub fn svnmsb_f32_z(
29907    pg: svbool_t,
29908    op1: svfloat32_t,
29909    op2: svfloat32_t,
29910    op3: svfloat32_t,
29911) -> svfloat32_t {
29912    svnmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
29913}
29914#[doc = "Negated multiply-subtract, multiplicand first"]
29915#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_z)"]
29916#[inline(always)]
29917#[target_feature(enable = "sve")]
29918#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29919#[cfg_attr(test, assert_instr(fnmsb))]
29920pub fn svnmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
29921    svnmsb_f32_z(pg, op1, op2, svdup_n_f32(op3))
29922}
29923#[doc = "Negated multiply-subtract, multiplicand first"]
29924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_m)"]
29925#[inline(always)]
29926#[target_feature(enable = "sve")]
29927#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29928#[cfg_attr(test, assert_instr(fnmsb))]
29929pub fn svnmsb_f64_m(
29930    pg: svbool_t,
29931    op1: svfloat64_t,
29932    op2: svfloat64_t,
29933    op3: svfloat64_t,
29934) -> svfloat64_t {
29935    unsafe extern "unadjusted" {
29936        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv2f64")]
29937        fn _svnmsb_f64_m(
29938            pg: svbool2_t,
29939            op1: svfloat64_t,
29940            op2: svfloat64_t,
29941            op3: svfloat64_t,
29942        ) -> svfloat64_t;
29943    }
29944    unsafe { _svnmsb_f64_m(pg.sve_into(), op1, op2, op3) }
29945}
29946#[doc = "Negated multiply-subtract, multiplicand first"]
29947#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_m)"]
29948#[inline(always)]
29949#[target_feature(enable = "sve")]
29950#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29951#[cfg_attr(test, assert_instr(fnmsb))]
29952pub fn svnmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29953    svnmsb_f64_m(pg, op1, op2, svdup_n_f64(op3))
29954}
29955#[doc = "Negated multiply-subtract, multiplicand first"]
29956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_x)"]
29957#[inline(always)]
29958#[target_feature(enable = "sve")]
29959#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29960#[cfg_attr(test, assert_instr(fnmsb))]
29961pub fn svnmsb_f64_x(
29962    pg: svbool_t,
29963    op1: svfloat64_t,
29964    op2: svfloat64_t,
29965    op3: svfloat64_t,
29966) -> svfloat64_t {
29967    svnmsb_f64_m(pg, op1, op2, op3)
29968}
29969#[doc = "Negated multiply-subtract, multiplicand first"]
29970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_x)"]
29971#[inline(always)]
29972#[target_feature(enable = "sve")]
29973#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29974#[cfg_attr(test, assert_instr(fnmsb))]
29975pub fn svnmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29976    svnmsb_f64_x(pg, op1, op2, svdup_n_f64(op3))
29977}
29978#[doc = "Negated multiply-subtract, multiplicand first"]
29979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_z)"]
29980#[inline(always)]
29981#[target_feature(enable = "sve")]
29982#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29983#[cfg_attr(test, assert_instr(fnmsb))]
29984pub fn svnmsb_f64_z(
29985    pg: svbool_t,
29986    op1: svfloat64_t,
29987    op2: svfloat64_t,
29988    op3: svfloat64_t,
29989) -> svfloat64_t {
29990    svnmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
29991}
29992#[doc = "Negated multiply-subtract, multiplicand first"]
29993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_z)"]
29994#[inline(always)]
29995#[target_feature(enable = "sve")]
29996#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
29997#[cfg_attr(test, assert_instr(fnmsb))]
29998pub fn svnmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
29999    svnmsb_f64_z(pg, op1, op2, svdup_n_f64(op3))
30000}
30001#[doc = "Bitwise NOR"]
30002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnor[_b]_z)"]
30003#[inline(always)]
30004#[target_feature(enable = "sve")]
30005#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30006#[cfg_attr(test, assert_instr(nor))]
30007pub fn svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
30008    unsafe extern "unadjusted" {
30009        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nor.z.nxv16i1")]
30010        fn _svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
30011    }
30012    unsafe { _svnor_b_z(pg, op1, op2) }
30013}
30014#[doc = "Bitwise invert"]
30015#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_b]_z)"]
30016#[inline(always)]
30017#[target_feature(enable = "sve")]
30018#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30019#[cfg_attr(test, assert_instr(not))]
30020pub fn svnot_b_z(pg: svbool_t, op: svbool_t) -> svbool_t {
30021    sveor_b_z(pg, op, pg)
30022}
30023#[doc = "Bitwise invert"]
30024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_m)"]
30025#[inline(always)]
30026#[target_feature(enable = "sve")]
30027#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30028#[cfg_attr(test, assert_instr(not))]
30029pub fn svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
30030    unsafe extern "unadjusted" {
30031        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv16i8")]
30032        fn _svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
30033    }
30034    unsafe { _svnot_s8_m(inactive, pg, op) }
30035}
30036#[doc = "Bitwise invert"]
30037#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_x)"]
30038#[inline(always)]
30039#[target_feature(enable = "sve")]
30040#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30041#[cfg_attr(test, assert_instr(not))]
30042pub fn svnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
30043    svnot_s8_m(op, pg, op)
30044}
30045#[doc = "Bitwise invert"]
30046#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_z)"]
30047#[inline(always)]
30048#[target_feature(enable = "sve")]
30049#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30050#[cfg_attr(test, assert_instr(not))]
30051pub fn svnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
30052    svnot_s8_m(svdup_n_s8(0), pg, op)
30053}
30054#[doc = "Bitwise invert"]
30055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_m)"]
30056#[inline(always)]
30057#[target_feature(enable = "sve")]
30058#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30059#[cfg_attr(test, assert_instr(not))]
30060pub fn svnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
30061    unsafe extern "unadjusted" {
30062        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv8i16")]
30063        fn _svnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
30064    }
30065    unsafe { _svnot_s16_m(inactive, pg.sve_into(), op) }
30066}
30067#[doc = "Bitwise invert"]
30068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_x)"]
30069#[inline(always)]
30070#[target_feature(enable = "sve")]
30071#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30072#[cfg_attr(test, assert_instr(not))]
30073pub fn svnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
30074    svnot_s16_m(op, pg, op)
30075}
30076#[doc = "Bitwise invert"]
30077#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_z)"]
30078#[inline(always)]
30079#[target_feature(enable = "sve")]
30080#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30081#[cfg_attr(test, assert_instr(not))]
30082pub fn svnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
30083    svnot_s16_m(svdup_n_s16(0), pg, op)
30084}
30085#[doc = "Bitwise invert"]
30086#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_m)"]
30087#[inline(always)]
30088#[target_feature(enable = "sve")]
30089#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30090#[cfg_attr(test, assert_instr(not))]
30091pub fn svnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
30092    unsafe extern "unadjusted" {
30093        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv4i32")]
30094        fn _svnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
30095    }
30096    unsafe { _svnot_s32_m(inactive, pg.sve_into(), op) }
30097}
30098#[doc = "Bitwise invert"]
30099#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_x)"]
30100#[inline(always)]
30101#[target_feature(enable = "sve")]
30102#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30103#[cfg_attr(test, assert_instr(not))]
30104pub fn svnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
30105    svnot_s32_m(op, pg, op)
30106}
30107#[doc = "Bitwise invert"]
30108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_z)"]
30109#[inline(always)]
30110#[target_feature(enable = "sve")]
30111#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30112#[cfg_attr(test, assert_instr(not))]
30113pub fn svnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
30114    svnot_s32_m(svdup_n_s32(0), pg, op)
30115}
30116#[doc = "Bitwise invert"]
30117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_m)"]
30118#[inline(always)]
30119#[target_feature(enable = "sve")]
30120#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30121#[cfg_attr(test, assert_instr(not))]
30122pub fn svnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
30123    unsafe extern "unadjusted" {
30124        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv2i64")]
30125        fn _svnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
30126    }
30127    unsafe { _svnot_s64_m(inactive, pg.sve_into(), op) }
30128}
30129#[doc = "Bitwise invert"]
30130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_x)"]
30131#[inline(always)]
30132#[target_feature(enable = "sve")]
30133#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30134#[cfg_attr(test, assert_instr(not))]
30135pub fn svnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
30136    svnot_s64_m(op, pg, op)
30137}
30138#[doc = "Bitwise invert"]
30139#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_z)"]
30140#[inline(always)]
30141#[target_feature(enable = "sve")]
30142#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30143#[cfg_attr(test, assert_instr(not))]
30144pub fn svnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
30145    svnot_s64_m(svdup_n_s64(0), pg, op)
30146}
30147#[doc = "Bitwise invert"]
30148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_m)"]
30149#[inline(always)]
30150#[target_feature(enable = "sve")]
30151#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30152#[cfg_attr(test, assert_instr(not))]
30153pub fn svnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t {
30154    unsafe { svnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
30155}
30156#[doc = "Bitwise invert"]
30157#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_x)"]
30158#[inline(always)]
30159#[target_feature(enable = "sve")]
30160#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30161#[cfg_attr(test, assert_instr(not))]
30162pub fn svnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t {
30163    svnot_u8_m(op, pg, op)
30164}
30165#[doc = "Bitwise invert"]
30166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_z)"]
30167#[inline(always)]
30168#[target_feature(enable = "sve")]
30169#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30170#[cfg_attr(test, assert_instr(not))]
30171pub fn svnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t {
30172    svnot_u8_m(svdup_n_u8(0), pg, op)
30173}
30174#[doc = "Bitwise invert"]
30175#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_m)"]
30176#[inline(always)]
30177#[target_feature(enable = "sve")]
30178#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30179#[cfg_attr(test, assert_instr(not))]
30180pub fn svnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
30181    unsafe { svnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
30182}
30183#[doc = "Bitwise invert"]
30184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_x)"]
30185#[inline(always)]
30186#[target_feature(enable = "sve")]
30187#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30188#[cfg_attr(test, assert_instr(not))]
30189pub fn svnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
30190    svnot_u16_m(op, pg, op)
30191}
30192#[doc = "Bitwise invert"]
30193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_z)"]
30194#[inline(always)]
30195#[target_feature(enable = "sve")]
30196#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30197#[cfg_attr(test, assert_instr(not))]
30198pub fn svnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
30199    svnot_u16_m(svdup_n_u16(0), pg, op)
30200}
30201#[doc = "Bitwise invert"]
30202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_m)"]
30203#[inline(always)]
30204#[target_feature(enable = "sve")]
30205#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30206#[cfg_attr(test, assert_instr(not))]
30207pub fn svnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
30208    unsafe { svnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
30209}
30210#[doc = "Bitwise invert"]
30211#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_x)"]
30212#[inline(always)]
30213#[target_feature(enable = "sve")]
30214#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30215#[cfg_attr(test, assert_instr(not))]
30216pub fn svnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
30217    svnot_u32_m(op, pg, op)
30218}
30219#[doc = "Bitwise invert"]
30220#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_z)"]
30221#[inline(always)]
30222#[target_feature(enable = "sve")]
30223#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30224#[cfg_attr(test, assert_instr(not))]
30225pub fn svnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
30226    svnot_u32_m(svdup_n_u32(0), pg, op)
30227}
30228#[doc = "Bitwise invert"]
30229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_m)"]
30230#[inline(always)]
30231#[target_feature(enable = "sve")]
30232#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30233#[cfg_attr(test, assert_instr(not))]
30234pub fn svnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
30235    unsafe { svnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
30236}
30237#[doc = "Bitwise invert"]
30238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_x)"]
30239#[inline(always)]
30240#[target_feature(enable = "sve")]
30241#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30242#[cfg_attr(test, assert_instr(not))]
30243pub fn svnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
30244    svnot_u64_m(op, pg, op)
30245}
30246#[doc = "Bitwise invert"]
30247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_z)"]
30248#[inline(always)]
30249#[target_feature(enable = "sve")]
30250#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30251#[cfg_attr(test, assert_instr(not))]
30252pub fn svnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
30253    svnot_u64_m(svdup_n_u64(0), pg, op)
30254}
30255#[doc = "Bitwise inclusive OR, inverting second argument"]
30256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorn[_b]_z)"]
30257#[inline(always)]
30258#[target_feature(enable = "sve")]
30259#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30260#[cfg_attr(test, assert_instr(orn))]
30261pub fn svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
30262    unsafe extern "unadjusted" {
30263        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orn.z.nvx16i1")]
30264        fn _svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
30265    }
30266    unsafe { _svorn_b_z(pg, op1, op2) }
30267}
30268#[doc = "Bitwise inclusive OR"]
30269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_b]_z)"]
30270#[inline(always)]
30271#[target_feature(enable = "sve")]
30272#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30273#[cfg_attr(test, assert_instr(orr))]
30274pub fn svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
30275    unsafe extern "unadjusted" {
30276        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.z.nvx16i1")]
30277        fn _svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
30278    }
30279    unsafe { _svorr_b_z(pg, op1, op2) }
30280}
30281#[doc = "Bitwise inclusive OR"]
30282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"]
30283#[inline(always)]
30284#[target_feature(enable = "sve")]
30285#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30286#[cfg_attr(test, assert_instr(orr))]
30287pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
30288    unsafe extern "unadjusted" {
30289        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")]
30290        fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
30291    }
30292    unsafe { _svorr_s8_m(pg, op1, op2) }
30293}
30294#[doc = "Bitwise inclusive OR"]
30295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"]
30296#[inline(always)]
30297#[target_feature(enable = "sve")]
30298#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30299#[cfg_attr(test, assert_instr(orr))]
30300pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
30301    svorr_s8_m(pg, op1, svdup_n_s8(op2))
30302}
30303#[doc = "Bitwise inclusive OR"]
30304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"]
30305#[inline(always)]
30306#[target_feature(enable = "sve")]
30307#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30308#[cfg_attr(test, assert_instr(orr))]
30309pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
30310    svorr_s8_m(pg, op1, op2)
30311}
30312#[doc = "Bitwise inclusive OR"]
30313#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"]
30314#[inline(always)]
30315#[target_feature(enable = "sve")]
30316#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30317#[cfg_attr(test, assert_instr(orr))]
30318pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
30319    svorr_s8_x(pg, op1, svdup_n_s8(op2))
30320}
30321#[doc = "Bitwise inclusive OR"]
30322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"]
30323#[inline(always)]
30324#[target_feature(enable = "sve")]
30325#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30326#[cfg_attr(test, assert_instr(orr))]
30327pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
30328    svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
30329}
30330#[doc = "Bitwise inclusive OR"]
30331#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"]
30332#[inline(always)]
30333#[target_feature(enable = "sve")]
30334#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30335#[cfg_attr(test, assert_instr(orr))]
30336pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
30337    svorr_s8_z(pg, op1, svdup_n_s8(op2))
30338}
30339#[doc = "Bitwise inclusive OR"]
30340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"]
30341#[inline(always)]
30342#[target_feature(enable = "sve")]
30343#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30344#[cfg_attr(test, assert_instr(orr))]
30345pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
30346    unsafe extern "unadjusted" {
30347        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")]
30348        fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
30349    }
30350    unsafe { _svorr_s16_m(pg.sve_into(), op1, op2) }
30351}
30352#[doc = "Bitwise inclusive OR"]
30353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"]
30354#[inline(always)]
30355#[target_feature(enable = "sve")]
30356#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30357#[cfg_attr(test, assert_instr(orr))]
30358pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
30359    svorr_s16_m(pg, op1, svdup_n_s16(op2))
30360}
30361#[doc = "Bitwise inclusive OR"]
30362#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"]
30363#[inline(always)]
30364#[target_feature(enable = "sve")]
30365#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30366#[cfg_attr(test, assert_instr(orr))]
30367pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
30368    svorr_s16_m(pg, op1, op2)
30369}
30370#[doc = "Bitwise inclusive OR"]
30371#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"]
30372#[inline(always)]
30373#[target_feature(enable = "sve")]
30374#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30375#[cfg_attr(test, assert_instr(orr))]
30376pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
30377    svorr_s16_x(pg, op1, svdup_n_s16(op2))
30378}
30379#[doc = "Bitwise inclusive OR"]
30380#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"]
30381#[inline(always)]
30382#[target_feature(enable = "sve")]
30383#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30384#[cfg_attr(test, assert_instr(orr))]
30385pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
30386    svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
30387}
30388#[doc = "Bitwise inclusive OR"]
30389#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"]
30390#[inline(always)]
30391#[target_feature(enable = "sve")]
30392#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30393#[cfg_attr(test, assert_instr(orr))]
30394pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
30395    svorr_s16_z(pg, op1, svdup_n_s16(op2))
30396}
30397#[doc = "Bitwise inclusive OR"]
30398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"]
30399#[inline(always)]
30400#[target_feature(enable = "sve")]
30401#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30402#[cfg_attr(test, assert_instr(orr))]
30403pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
30404    unsafe extern "unadjusted" {
30405        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")]
30406        fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
30407    }
30408    unsafe { _svorr_s32_m(pg.sve_into(), op1, op2) }
30409}
30410#[doc = "Bitwise inclusive OR"]
30411#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"]
30412#[inline(always)]
30413#[target_feature(enable = "sve")]
30414#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30415#[cfg_attr(test, assert_instr(orr))]
30416pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
30417    svorr_s32_m(pg, op1, svdup_n_s32(op2))
30418}
30419#[doc = "Bitwise inclusive OR"]
30420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"]
30421#[inline(always)]
30422#[target_feature(enable = "sve")]
30423#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30424#[cfg_attr(test, assert_instr(orr))]
30425pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
30426    svorr_s32_m(pg, op1, op2)
30427}
30428#[doc = "Bitwise inclusive OR"]
30429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"]
30430#[inline(always)]
30431#[target_feature(enable = "sve")]
30432#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30433#[cfg_attr(test, assert_instr(orr))]
30434pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
30435    svorr_s32_x(pg, op1, svdup_n_s32(op2))
30436}
30437#[doc = "Bitwise inclusive OR"]
30438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"]
30439#[inline(always)]
30440#[target_feature(enable = "sve")]
30441#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30442#[cfg_attr(test, assert_instr(orr))]
30443pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
30444    svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
30445}
30446#[doc = "Bitwise inclusive OR"]
30447#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"]
30448#[inline(always)]
30449#[target_feature(enable = "sve")]
30450#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30451#[cfg_attr(test, assert_instr(orr))]
30452pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
30453    svorr_s32_z(pg, op1, svdup_n_s32(op2))
30454}
30455#[doc = "Bitwise inclusive OR"]
30456#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"]
30457#[inline(always)]
30458#[target_feature(enable = "sve")]
30459#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30460#[cfg_attr(test, assert_instr(orr))]
30461pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
30462    unsafe extern "unadjusted" {
30463        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")]
30464        fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
30465    }
30466    unsafe { _svorr_s64_m(pg.sve_into(), op1, op2) }
30467}
30468#[doc = "Bitwise inclusive OR"]
30469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"]
30470#[inline(always)]
30471#[target_feature(enable = "sve")]
30472#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30473#[cfg_attr(test, assert_instr(orr))]
30474pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
30475    svorr_s64_m(pg, op1, svdup_n_s64(op2))
30476}
30477#[doc = "Bitwise inclusive OR"]
30478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"]
30479#[inline(always)]
30480#[target_feature(enable = "sve")]
30481#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30482#[cfg_attr(test, assert_instr(orr))]
30483pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
30484    svorr_s64_m(pg, op1, op2)
30485}
30486#[doc = "Bitwise inclusive OR"]
30487#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"]
30488#[inline(always)]
30489#[target_feature(enable = "sve")]
30490#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30491#[cfg_attr(test, assert_instr(orr))]
30492pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
30493    svorr_s64_x(pg, op1, svdup_n_s64(op2))
30494}
30495#[doc = "Bitwise inclusive OR"]
30496#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"]
30497#[inline(always)]
30498#[target_feature(enable = "sve")]
30499#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30500#[cfg_attr(test, assert_instr(orr))]
30501pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
30502    svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
30503}
30504#[doc = "Bitwise inclusive OR"]
30505#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"]
30506#[inline(always)]
30507#[target_feature(enable = "sve")]
30508#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30509#[cfg_attr(test, assert_instr(orr))]
30510pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
30511    svorr_s64_z(pg, op1, svdup_n_s64(op2))
30512}
30513#[doc = "Bitwise inclusive OR"]
30514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"]
30515#[inline(always)]
30516#[target_feature(enable = "sve")]
30517#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30518#[cfg_attr(test, assert_instr(orr))]
30519pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
30520    unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
30521}
30522#[doc = "Bitwise inclusive OR"]
30523#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"]
30524#[inline(always)]
30525#[target_feature(enable = "sve")]
30526#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30527#[cfg_attr(test, assert_instr(orr))]
30528pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
30529    svorr_u8_m(pg, op1, svdup_n_u8(op2))
30530}
30531#[doc = "Bitwise inclusive OR"]
30532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"]
30533#[inline(always)]
30534#[target_feature(enable = "sve")]
30535#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30536#[cfg_attr(test, assert_instr(orr))]
30537pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
30538    svorr_u8_m(pg, op1, op2)
30539}
30540#[doc = "Bitwise inclusive OR"]
30541#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"]
30542#[inline(always)]
30543#[target_feature(enable = "sve")]
30544#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30545#[cfg_attr(test, assert_instr(orr))]
30546pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
30547    svorr_u8_x(pg, op1, svdup_n_u8(op2))
30548}
30549#[doc = "Bitwise inclusive OR"]
30550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"]
30551#[inline(always)]
30552#[target_feature(enable = "sve")]
30553#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30554#[cfg_attr(test, assert_instr(orr))]
30555pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
30556    svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
30557}
30558#[doc = "Bitwise inclusive OR"]
30559#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"]
30560#[inline(always)]
30561#[target_feature(enable = "sve")]
30562#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30563#[cfg_attr(test, assert_instr(orr))]
30564pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
30565    svorr_u8_z(pg, op1, svdup_n_u8(op2))
30566}
30567#[doc = "Bitwise inclusive OR"]
30568#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"]
30569#[inline(always)]
30570#[target_feature(enable = "sve")]
30571#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30572#[cfg_attr(test, assert_instr(orr))]
30573pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
30574    unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
30575}
30576#[doc = "Bitwise inclusive OR"]
30577#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"]
30578#[inline(always)]
30579#[target_feature(enable = "sve")]
30580#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30581#[cfg_attr(test, assert_instr(orr))]
30582pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
30583    svorr_u16_m(pg, op1, svdup_n_u16(op2))
30584}
30585#[doc = "Bitwise inclusive OR"]
30586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"]
30587#[inline(always)]
30588#[target_feature(enable = "sve")]
30589#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30590#[cfg_attr(test, assert_instr(orr))]
30591pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
30592    svorr_u16_m(pg, op1, op2)
30593}
30594#[doc = "Bitwise inclusive OR"]
30595#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"]
30596#[inline(always)]
30597#[target_feature(enable = "sve")]
30598#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30599#[cfg_attr(test, assert_instr(orr))]
30600pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
30601    svorr_u16_x(pg, op1, svdup_n_u16(op2))
30602}
30603#[doc = "Bitwise inclusive OR"]
30604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"]
30605#[inline(always)]
30606#[target_feature(enable = "sve")]
30607#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30608#[cfg_attr(test, assert_instr(orr))]
30609pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
30610    svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
30611}
30612#[doc = "Bitwise inclusive OR"]
30613#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"]
30614#[inline(always)]
30615#[target_feature(enable = "sve")]
30616#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30617#[cfg_attr(test, assert_instr(orr))]
30618pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
30619    svorr_u16_z(pg, op1, svdup_n_u16(op2))
30620}
30621#[doc = "Bitwise inclusive OR"]
30622#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"]
30623#[inline(always)]
30624#[target_feature(enable = "sve")]
30625#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30626#[cfg_attr(test, assert_instr(orr))]
30627pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
30628    unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
30629}
30630#[doc = "Bitwise inclusive OR"]
30631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"]
30632#[inline(always)]
30633#[target_feature(enable = "sve")]
30634#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30635#[cfg_attr(test, assert_instr(orr))]
30636pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
30637    svorr_u32_m(pg, op1, svdup_n_u32(op2))
30638}
30639#[doc = "Bitwise inclusive OR"]
30640#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"]
30641#[inline(always)]
30642#[target_feature(enable = "sve")]
30643#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30644#[cfg_attr(test, assert_instr(orr))]
30645pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
30646    svorr_u32_m(pg, op1, op2)
30647}
30648#[doc = "Bitwise inclusive OR"]
30649#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"]
30650#[inline(always)]
30651#[target_feature(enable = "sve")]
30652#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30653#[cfg_attr(test, assert_instr(orr))]
30654pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
30655    svorr_u32_x(pg, op1, svdup_n_u32(op2))
30656}
30657#[doc = "Bitwise inclusive OR"]
30658#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"]
30659#[inline(always)]
30660#[target_feature(enable = "sve")]
30661#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30662#[cfg_attr(test, assert_instr(orr))]
30663pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
30664    svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
30665}
30666#[doc = "Bitwise inclusive OR"]
30667#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"]
30668#[inline(always)]
30669#[target_feature(enable = "sve")]
30670#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30671#[cfg_attr(test, assert_instr(orr))]
30672pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
30673    svorr_u32_z(pg, op1, svdup_n_u32(op2))
30674}
30675#[doc = "Bitwise inclusive OR"]
30676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"]
30677#[inline(always)]
30678#[target_feature(enable = "sve")]
30679#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30680#[cfg_attr(test, assert_instr(orr))]
30681pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
30682    unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
30683}
30684#[doc = "Bitwise inclusive OR"]
30685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"]
30686#[inline(always)]
30687#[target_feature(enable = "sve")]
30688#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30689#[cfg_attr(test, assert_instr(orr))]
30690pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
30691    svorr_u64_m(pg, op1, svdup_n_u64(op2))
30692}
30693#[doc = "Bitwise inclusive OR"]
30694#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"]
30695#[inline(always)]
30696#[target_feature(enable = "sve")]
30697#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30698#[cfg_attr(test, assert_instr(orr))]
30699pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
30700    svorr_u64_m(pg, op1, op2)
30701}
30702#[doc = "Bitwise inclusive OR"]
30703#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"]
30704#[inline(always)]
30705#[target_feature(enable = "sve")]
30706#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30707#[cfg_attr(test, assert_instr(orr))]
30708pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
30709    svorr_u64_x(pg, op1, svdup_n_u64(op2))
30710}
30711#[doc = "Bitwise inclusive OR"]
30712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"]
30713#[inline(always)]
30714#[target_feature(enable = "sve")]
30715#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30716#[cfg_attr(test, assert_instr(orr))]
30717pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
30718    svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
30719}
30720#[doc = "Bitwise inclusive OR"]
30721#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"]
30722#[inline(always)]
30723#[target_feature(enable = "sve")]
30724#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30725#[cfg_attr(test, assert_instr(orr))]
30726pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
30727    svorr_u64_z(pg, op1, svdup_n_u64(op2))
30728}
30729#[doc = "Bitwise inclusive OR reduction to scalar"]
30730#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s8])"]
30731#[inline(always)]
30732#[target_feature(enable = "sve")]
30733#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30734#[cfg_attr(test, assert_instr(orv))]
30735pub fn svorv_s8(pg: svbool_t, op: svint8_t) -> i8 {
30736    unsafe extern "unadjusted" {
30737        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv16i8")]
30738        fn _svorv_s8(pg: svbool_t, op: svint8_t) -> i8;
30739    }
30740    unsafe { _svorv_s8(pg, op) }
30741}
30742#[doc = "Bitwise inclusive OR reduction to scalar"]
30743#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s16])"]
30744#[inline(always)]
30745#[target_feature(enable = "sve")]
30746#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30747#[cfg_attr(test, assert_instr(orv))]
30748pub fn svorv_s16(pg: svbool_t, op: svint16_t) -> i16 {
30749    unsafe extern "unadjusted" {
30750        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv8i16")]
30751        fn _svorv_s16(pg: svbool8_t, op: svint16_t) -> i16;
30752    }
30753    unsafe { _svorv_s16(pg.sve_into(), op) }
30754}
30755#[doc = "Bitwise inclusive OR reduction to scalar"]
30756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s32])"]
30757#[inline(always)]
30758#[target_feature(enable = "sve")]
30759#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30760#[cfg_attr(test, assert_instr(orv))]
30761pub fn svorv_s32(pg: svbool_t, op: svint32_t) -> i32 {
30762    unsafe extern "unadjusted" {
30763        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv4i32")]
30764        fn _svorv_s32(pg: svbool4_t, op: svint32_t) -> i32;
30765    }
30766    unsafe { _svorv_s32(pg.sve_into(), op) }
30767}
30768#[doc = "Bitwise inclusive OR reduction to scalar"]
30769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s64])"]
30770#[inline(always)]
30771#[target_feature(enable = "sve")]
30772#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30773#[cfg_attr(test, assert_instr(orv))]
30774pub fn svorv_s64(pg: svbool_t, op: svint64_t) -> i64 {
30775    unsafe extern "unadjusted" {
30776        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv2i64")]
30777        fn _svorv_s64(pg: svbool2_t, op: svint64_t) -> i64;
30778    }
30779    unsafe { _svorv_s64(pg.sve_into(), op) }
30780}
30781#[doc = "Bitwise inclusive OR reduction to scalar"]
30782#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u8])"]
30783#[inline(always)]
30784#[target_feature(enable = "sve")]
30785#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30786#[cfg_attr(test, assert_instr(orv))]
30787pub fn svorv_u8(pg: svbool_t, op: svuint8_t) -> u8 {
30788    unsafe { svorv_s8(pg, op.as_signed()).as_unsigned() }
30789}
30790#[doc = "Bitwise inclusive OR reduction to scalar"]
30791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u16])"]
30792#[inline(always)]
30793#[target_feature(enable = "sve")]
30794#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30795#[cfg_attr(test, assert_instr(orv))]
30796pub fn svorv_u16(pg: svbool_t, op: svuint16_t) -> u16 {
30797    unsafe { svorv_s16(pg, op.as_signed()).as_unsigned() }
30798}
30799#[doc = "Bitwise inclusive OR reduction to scalar"]
30800#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u32])"]
30801#[inline(always)]
30802#[target_feature(enable = "sve")]
30803#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30804#[cfg_attr(test, assert_instr(orv))]
30805pub fn svorv_u32(pg: svbool_t, op: svuint32_t) -> u32 {
30806    unsafe { svorv_s32(pg, op.as_signed()).as_unsigned() }
30807}
30808#[doc = "Bitwise inclusive OR reduction to scalar"]
30809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u64])"]
30810#[inline(always)]
30811#[target_feature(enable = "sve")]
30812#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30813#[cfg_attr(test, assert_instr(orv))]
30814pub fn svorv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
30815    unsafe { svorv_s64(pg, op.as_signed()).as_unsigned() }
30816}
30817#[doc = "Set all predicate elements to false"]
30818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfalse[_b])"]
30819#[inline(always)]
30820#[target_feature(enable = "sve")]
30821#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30822pub fn svpfalse_b() -> svbool_t {
30823    svdupq_n_b8(
30824        false, false, false, false, false, false, false, false, false, false, false, false, false,
30825        false, false, false,
30826    )
30827}
30828#[doc = "Set the first active predicate element to true"]
30829#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfirst[_b])"]
30830#[inline(always)]
30831#[target_feature(enable = "sve")]
30832#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30833#[cfg_attr(test, assert_instr(pfirst))]
30834pub fn svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t {
30835    unsafe extern "unadjusted" {
30836        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pfirst.nxv16i1")]
30837        fn _svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t;
30838    }
30839    unsafe { _svpfirst_b(pg, op) }
30840}
30841#[doc = "Find next active predicate"]
30842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b8)"]
30843#[inline(always)]
30844#[target_feature(enable = "sve")]
30845#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30846#[cfg_attr(test, assert_instr(pnext))]
30847pub fn svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t {
30848    unsafe extern "unadjusted" {
30849        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv16i1")]
30850        fn _svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t;
30851    }
30852    unsafe { _svpnext_b8(pg, op) }
30853}
30854#[doc = "Find next active predicate"]
30855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b16)"]
30856#[inline(always)]
30857#[target_feature(enable = "sve")]
30858#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30859#[cfg_attr(test, assert_instr(pnext))]
30860pub fn svpnext_b16(pg: svbool_t, op: svbool_t) -> svbool_t {
30861    unsafe extern "unadjusted" {
30862        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv8i1")]
30863        fn _svpnext_b16(pg: svbool8_t, op: svbool8_t) -> svbool8_t;
30864    }
30865    unsafe { _svpnext_b16(pg.sve_into(), op.sve_into()).sve_into() }
30866}
30867#[doc = "Find next active predicate"]
30868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b32)"]
30869#[inline(always)]
30870#[target_feature(enable = "sve")]
30871#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30872#[cfg_attr(test, assert_instr(pnext))]
30873pub fn svpnext_b32(pg: svbool_t, op: svbool_t) -> svbool_t {
30874    unsafe extern "unadjusted" {
30875        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv4i1")]
30876        fn _svpnext_b32(pg: svbool4_t, op: svbool4_t) -> svbool4_t;
30877    }
30878    unsafe { _svpnext_b32(pg.sve_into(), op.sve_into()).sve_into() }
30879}
30880#[doc = "Find next active predicate"]
30881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b64)"]
30882#[inline(always)]
30883#[target_feature(enable = "sve")]
30884#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30885#[cfg_attr(test, assert_instr(pnext))]
30886pub fn svpnext_b64(pg: svbool_t, op: svbool_t) -> svbool_t {
30887    unsafe extern "unadjusted" {
30888        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv2i1")]
30889        fn _svpnext_b64(pg: svbool2_t, op: svbool2_t) -> svbool2_t;
30890    }
30891    unsafe { _svpnext_b64(pg.sve_into(), op.sve_into()).sve_into() }
30892}
30893#[doc = "Prefetch bytes"]
30894#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb)"]
30895#[doc = "## Safety"]
30896#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
30897#[inline(always)]
30898#[target_feature(enable = "sve")]
30899#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30900# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
30901pub unsafe fn svprfb<const OP: svprfop, T>(pg: svbool_t, base: *const T) {
30902    unsafe extern "unadjusted" {
30903        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv16i1")]
30904        fn _svprfb(pg: svbool_t, base: *const crate::ffi::c_void, op: svprfop);
30905    }
30906    _svprfb(pg, base as *const crate::ffi::c_void, OP)
30907}
30908#[doc = "Prefetch halfwords"]
30909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh)"]
30910#[doc = "## Safety"]
30911#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
30912#[inline(always)]
30913#[target_feature(enable = "sve")]
30914#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30915# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
30916pub unsafe fn svprfh<const OP: svprfop, T>(pg: svbool_t, base: *const T) {
30917    unsafe extern "unadjusted" {
30918        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv8i1")]
30919        fn _svprfh(pg: svbool8_t, base: *const crate::ffi::c_void, op: svprfop);
30920    }
30921    _svprfh(pg.sve_into(), base as *const crate::ffi::c_void, OP)
30922}
30923#[doc = "Prefetch words"]
30924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw)"]
30925#[doc = "## Safety"]
30926#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
30927#[inline(always)]
30928#[target_feature(enable = "sve")]
30929#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30930# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
30931pub unsafe fn svprfw<const OP: svprfop, T>(pg: svbool_t, base: *const T) {
30932    unsafe extern "unadjusted" {
30933        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv4i1")]
30934        fn _svprfw(pg: svbool4_t, base: *const crate::ffi::c_void, op: svprfop);
30935    }
30936    _svprfw(pg.sve_into(), base as *const crate::ffi::c_void, OP)
30937}
30938#[doc = "Prefetch doublewords"]
30939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd)"]
30940#[doc = "## Safety"]
30941#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
30942#[inline(always)]
30943#[target_feature(enable = "sve")]
30944#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30945# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
30946pub unsafe fn svprfd<const OP: svprfop, T>(pg: svbool_t, base: *const T) {
30947    unsafe extern "unadjusted" {
30948        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv2i1")]
30949        fn _svprfd(pg: svbool2_t, base: *const crate::ffi::c_void, op: svprfop);
30950    }
30951    _svprfd(pg.sve_into(), base as *const crate::ffi::c_void, OP)
30952}
30953#[doc = "Prefetch bytes"]
30954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s32]offset)"]
30955#[doc = "## Safety"]
30956#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
30957#[inline(always)]
30958#[target_feature(enable = "sve")]
30959#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30960# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
30961pub unsafe fn svprfb_gather_s32offset<const OP: svprfop, T>(
30962    pg: svbool_t,
30963    base: *const T,
30964    offsets: svint32_t,
30965) {
30966    unsafe extern "unadjusted" {
30967        #[cfg_attr(
30968            target_arch = "aarch64",
30969            link_name = "llvm.aarch64.sve.prfb.gather.sxtw.index.nxv4i32"
30970        )]
30971        fn _svprfb_gather_s32offset(
30972            pg: svbool4_t,
30973            base: *const crate::ffi::c_void,
30974            offsets: svint32_t,
30975            op: svprfop,
30976        );
30977    }
30978    _svprfb_gather_s32offset(
30979        pg.sve_into(),
30980        base as *const crate::ffi::c_void,
30981        offsets,
30982        OP,
30983    )
30984}
30985#[doc = "Prefetch halfwords"]
30986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s32]index)"]
30987#[doc = "## Safety"]
30988#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
30989#[inline(always)]
30990#[target_feature(enable = "sve")]
30991#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
30992# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
30993pub unsafe fn svprfh_gather_s32index<const OP: svprfop, T>(
30994    pg: svbool_t,
30995    base: *const T,
30996    indices: svint32_t,
30997) {
30998    unsafe extern "unadjusted" {
30999        #[cfg_attr(
31000            target_arch = "aarch64",
31001            link_name = "llvm.aarch64.sve.prfh.gather.sxtw.index.nxv4i32"
31002        )]
31003        fn _svprfh_gather_s32index(
31004            pg: svbool4_t,
31005            base: *const crate::ffi::c_void,
31006            indices: svint32_t,
31007            op: svprfop,
31008        );
31009    }
31010    _svprfh_gather_s32index(
31011        pg.sve_into(),
31012        base as *const crate::ffi::c_void,
31013        indices,
31014        OP,
31015    )
31016}
31017#[doc = "Prefetch words"]
31018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s32]index)"]
31019#[doc = "## Safety"]
31020#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31021#[inline(always)]
31022#[target_feature(enable = "sve")]
31023#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31024# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31025pub unsafe fn svprfw_gather_s32index<const OP: svprfop, T>(
31026    pg: svbool_t,
31027    base: *const T,
31028    indices: svint32_t,
31029) {
31030    unsafe extern "unadjusted" {
31031        #[cfg_attr(
31032            target_arch = "aarch64",
31033            link_name = "llvm.aarch64.sve.prfw.gather.sxtw.index.nxv4i32"
31034        )]
31035        fn _svprfw_gather_s32index(
31036            pg: svbool4_t,
31037            base: *const crate::ffi::c_void,
31038            indices: svint32_t,
31039            op: svprfop,
31040        );
31041    }
31042    _svprfw_gather_s32index(
31043        pg.sve_into(),
31044        base as *const crate::ffi::c_void,
31045        indices,
31046        OP,
31047    )
31048}
31049#[doc = "Prefetch doublewords"]
31050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s32]index)"]
31051#[doc = "## Safety"]
31052#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31053#[inline(always)]
31054#[target_feature(enable = "sve")]
31055#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31056# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31057pub unsafe fn svprfd_gather_s32index<const OP: svprfop, T>(
31058    pg: svbool_t,
31059    base: *const T,
31060    indices: svint32_t,
31061) {
31062    unsafe extern "unadjusted" {
31063        #[cfg_attr(
31064            target_arch = "aarch64",
31065            link_name = "llvm.aarch64.sve.prfd.gather.sxtw.index.nxv4i32"
31066        )]
31067        fn _svprfd_gather_s32index(
31068            pg: svbool4_t,
31069            base: *const crate::ffi::c_void,
31070            indices: svint32_t,
31071            op: svprfop,
31072        );
31073    }
31074    _svprfd_gather_s32index(
31075        pg.sve_into(),
31076        base as *const crate::ffi::c_void,
31077        indices,
31078        OP,
31079    )
31080}
31081#[doc = "Prefetch bytes"]
31082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s64]offset)"]
31083#[doc = "## Safety"]
31084#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31085#[inline(always)]
31086#[target_feature(enable = "sve")]
31087#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31088# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31089pub unsafe fn svprfb_gather_s64offset<const OP: svprfop, T>(
31090    pg: svbool_t,
31091    base: *const T,
31092    offsets: svint64_t,
31093) {
31094    unsafe extern "unadjusted" {
31095        #[cfg_attr(
31096            target_arch = "aarch64",
31097            link_name = "llvm.aarch64.sve.prfb.gather.index.nxv2i64"
31098        )]
31099        fn _svprfb_gather_s64offset(
31100            pg: svbool2_t,
31101            base: *const crate::ffi::c_void,
31102            offsets: svint64_t,
31103            op: svprfop,
31104        );
31105    }
31106    _svprfb_gather_s64offset(
31107        pg.sve_into(),
31108        base as *const crate::ffi::c_void,
31109        offsets,
31110        OP,
31111    )
31112}
31113#[doc = "Prefetch halfwords"]
31114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s64]index)"]
31115#[doc = "## Safety"]
31116#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31117#[inline(always)]
31118#[target_feature(enable = "sve")]
31119#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31120# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31121pub unsafe fn svprfh_gather_s64index<const OP: svprfop, T>(
31122    pg: svbool_t,
31123    base: *const T,
31124    indices: svint64_t,
31125) {
31126    unsafe extern "unadjusted" {
31127        #[cfg_attr(
31128            target_arch = "aarch64",
31129            link_name = "llvm.aarch64.sve.prfh.gather.index.nxv2i64"
31130        )]
31131        fn _svprfh_gather_s64index(
31132            pg: svbool2_t,
31133            base: *const crate::ffi::c_void,
31134            indices: svint64_t,
31135            op: svprfop,
31136        );
31137    }
31138    _svprfh_gather_s64index(
31139        pg.sve_into(),
31140        base as *const crate::ffi::c_void,
31141        indices,
31142        OP,
31143    )
31144}
31145#[doc = "Prefetch words"]
31146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s64]index)"]
31147#[doc = "## Safety"]
31148#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31149#[inline(always)]
31150#[target_feature(enable = "sve")]
31151#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31152# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31153pub unsafe fn svprfw_gather_s64index<const OP: svprfop, T>(
31154    pg: svbool_t,
31155    base: *const T,
31156    indices: svint64_t,
31157) {
31158    unsafe extern "unadjusted" {
31159        #[cfg_attr(
31160            target_arch = "aarch64",
31161            link_name = "llvm.aarch64.sve.prfw.gather.index.nxv2i64"
31162        )]
31163        fn _svprfw_gather_s64index(
31164            pg: svbool2_t,
31165            base: *const crate::ffi::c_void,
31166            indices: svint64_t,
31167            op: svprfop,
31168        );
31169    }
31170    _svprfw_gather_s64index(
31171        pg.sve_into(),
31172        base as *const crate::ffi::c_void,
31173        indices,
31174        OP,
31175    )
31176}
31177#[doc = "Prefetch doublewords"]
31178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s64]index)"]
31179#[doc = "## Safety"]
31180#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31181#[inline(always)]
31182#[target_feature(enable = "sve")]
31183#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31184# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31185pub unsafe fn svprfd_gather_s64index<const OP: svprfop, T>(
31186    pg: svbool_t,
31187    base: *const T,
31188    indices: svint64_t,
31189) {
31190    unsafe extern "unadjusted" {
31191        #[cfg_attr(
31192            target_arch = "aarch64",
31193            link_name = "llvm.aarch64.sve.prfd.gather.index.nxv2i64"
31194        )]
31195        fn _svprfd_gather_s64index(
31196            pg: svbool2_t,
31197            base: *const crate::ffi::c_void,
31198            indices: svint64_t,
31199            op: svprfop,
31200        );
31201    }
31202    _svprfd_gather_s64index(
31203        pg.sve_into(),
31204        base as *const crate::ffi::c_void,
31205        indices,
31206        OP,
31207    )
31208}
31209#[doc = "Prefetch bytes"]
31210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u32]offset)"]
31211#[doc = "## Safety"]
31212#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31213#[inline(always)]
31214#[target_feature(enable = "sve")]
31215#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31216# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31217pub unsafe fn svprfb_gather_u32offset<const OP: svprfop, T>(
31218    pg: svbool_t,
31219    base: *const T,
31220    offsets: svuint32_t,
31221) {
31222    unsafe extern "unadjusted" {
31223        #[cfg_attr(
31224            target_arch = "aarch64",
31225            link_name = "llvm.aarch64.sve.prfb.gather.uxtw.index.nxv4i32"
31226        )]
31227        fn _svprfb_gather_u32offset(
31228            pg: svbool4_t,
31229            base: *const crate::ffi::c_void,
31230            offsets: svint32_t,
31231            op: svprfop,
31232        );
31233    }
31234    _svprfb_gather_u32offset(
31235        pg.sve_into(),
31236        base as *const crate::ffi::c_void,
31237        offsets.as_signed(),
31238        OP,
31239    )
31240}
31241#[doc = "Prefetch halfwords"]
31242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u32]index)"]
31243#[doc = "## Safety"]
31244#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31245#[inline(always)]
31246#[target_feature(enable = "sve")]
31247#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31248# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31249pub unsafe fn svprfh_gather_u32index<const OP: svprfop, T>(
31250    pg: svbool_t,
31251    base: *const T,
31252    indices: svuint32_t,
31253) {
31254    unsafe extern "unadjusted" {
31255        #[cfg_attr(
31256            target_arch = "aarch64",
31257            link_name = "llvm.aarch64.sve.prfh.gather.uxtw.index.nxv4i32"
31258        )]
31259        fn _svprfh_gather_u32index(
31260            pg: svbool4_t,
31261            base: *const crate::ffi::c_void,
31262            indices: svint32_t,
31263            op: svprfop,
31264        );
31265    }
31266    _svprfh_gather_u32index(
31267        pg.sve_into(),
31268        base as *const crate::ffi::c_void,
31269        indices.as_signed(),
31270        OP,
31271    )
31272}
31273#[doc = "Prefetch words"]
31274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u32]index)"]
31275#[doc = "## Safety"]
31276#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31277#[inline(always)]
31278#[target_feature(enable = "sve")]
31279#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31280# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31281pub unsafe fn svprfw_gather_u32index<const OP: svprfop, T>(
31282    pg: svbool_t,
31283    base: *const T,
31284    indices: svuint32_t,
31285) {
31286    unsafe extern "unadjusted" {
31287        #[cfg_attr(
31288            target_arch = "aarch64",
31289            link_name = "llvm.aarch64.sve.prfw.gather.uxtw.index.nxv4i32"
31290        )]
31291        fn _svprfw_gather_u32index(
31292            pg: svbool4_t,
31293            base: *const crate::ffi::c_void,
31294            indices: svint32_t,
31295            op: svprfop,
31296        );
31297    }
31298    _svprfw_gather_u32index(
31299        pg.sve_into(),
31300        base as *const crate::ffi::c_void,
31301        indices.as_signed(),
31302        OP,
31303    )
31304}
31305#[doc = "Prefetch doublewords"]
31306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u32]index)"]
31307#[doc = "## Safety"]
31308#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31309#[inline(always)]
31310#[target_feature(enable = "sve")]
31311#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31312# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31313pub unsafe fn svprfd_gather_u32index<const OP: svprfop, T>(
31314    pg: svbool_t,
31315    base: *const T,
31316    indices: svuint32_t,
31317) {
31318    unsafe extern "unadjusted" {
31319        #[cfg_attr(
31320            target_arch = "aarch64",
31321            link_name = "llvm.aarch64.sve.prfd.gather.uxtw.index.nxv4i32"
31322        )]
31323        fn _svprfd_gather_u32index(
31324            pg: svbool4_t,
31325            base: *const crate::ffi::c_void,
31326            indices: svint32_t,
31327            op: svprfop,
31328        );
31329    }
31330    _svprfd_gather_u32index(
31331        pg.sve_into(),
31332        base as *const crate::ffi::c_void,
31333        indices.as_signed(),
31334        OP,
31335    )
31336}
31337#[doc = "Prefetch bytes"]
31338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u64]offset)"]
31339#[doc = "## Safety"]
31340#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31341#[inline(always)]
31342#[target_feature(enable = "sve")]
31343#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31344# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31345pub unsafe fn svprfb_gather_u64offset<const OP: svprfop, T>(
31346    pg: svbool_t,
31347    base: *const T,
31348    offsets: svuint64_t,
31349) {
31350    svprfb_gather_s64offset::<OP, T>(pg, base, offsets.as_signed())
31351}
31352#[doc = "Prefetch halfwords"]
31353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u64]index)"]
31354#[doc = "## Safety"]
31355#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31356#[inline(always)]
31357#[target_feature(enable = "sve")]
31358#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31359# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31360pub unsafe fn svprfh_gather_u64index<const OP: svprfop, T>(
31361    pg: svbool_t,
31362    base: *const T,
31363    indices: svuint64_t,
31364) {
31365    svprfh_gather_s64index::<OP, T>(pg, base, indices.as_signed())
31366}
31367#[doc = "Prefetch words"]
31368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u64]index)"]
31369#[doc = "## Safety"]
31370#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31371#[inline(always)]
31372#[target_feature(enable = "sve")]
31373#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31374# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31375pub unsafe fn svprfw_gather_u64index<const OP: svprfop, T>(
31376    pg: svbool_t,
31377    base: *const T,
31378    indices: svuint64_t,
31379) {
31380    svprfw_gather_s64index::<OP, T>(pg, base, indices.as_signed())
31381}
31382#[doc = "Prefetch doublewords"]
31383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u64]index)"]
31384#[doc = "## Safety"]
31385#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31386#[inline(always)]
31387#[target_feature(enable = "sve")]
31388#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31389# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31390pub unsafe fn svprfd_gather_u64index<const OP: svprfop, T>(
31391    pg: svbool_t,
31392    base: *const T,
31393    indices: svuint64_t,
31394) {
31395    svprfd_gather_s64index::<OP, T>(pg, base, indices.as_signed())
31396}
31397#[doc = "Prefetch bytes"]
31398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base])"]
31399#[doc = "## Safety"]
31400#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31401#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31402#[inline(always)]
31403#[target_feature(enable = "sve")]
31404#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31405# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
31406pub unsafe fn svprfb_gather_u32base<const OP: svprfop>(pg: svbool_t, bases: svuint32_t) {
31407    unsafe extern "unadjusted" {
31408        #[cfg_attr(
31409            target_arch = "aarch64",
31410            link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32"
31411        )]
31412        fn _svprfb_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
31413    }
31414    _svprfb_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP)
31415}
31416#[doc = "Prefetch halfwords"]
31417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base])"]
31418#[doc = "## Safety"]
31419#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31420#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31421#[inline(always)]
31422#[target_feature(enable = "sve")]
31423#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31424# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))]
31425pub unsafe fn svprfh_gather_u32base<const OP: svprfop>(pg: svbool_t, bases: svuint32_t) {
31426    unsafe extern "unadjusted" {
31427        #[cfg_attr(
31428            target_arch = "aarch64",
31429            link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32"
31430        )]
31431        fn _svprfh_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
31432    }
31433    _svprfh_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP)
31434}
31435#[doc = "Prefetch words"]
31436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base])"]
31437#[doc = "## Safety"]
31438#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31439#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31440#[inline(always)]
31441#[target_feature(enable = "sve")]
31442#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31443# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))]
31444pub unsafe fn svprfw_gather_u32base<const OP: svprfop>(pg: svbool_t, bases: svuint32_t) {
31445    unsafe extern "unadjusted" {
31446        #[cfg_attr(
31447            target_arch = "aarch64",
31448            link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32"
31449        )]
31450        fn _svprfw_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
31451    }
31452    _svprfw_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP)
31453}
31454#[doc = "Prefetch doublewords"]
31455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base])"]
31456#[doc = "## Safety"]
31457#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31458#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31459#[inline(always)]
31460#[target_feature(enable = "sve")]
31461#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31462# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))]
31463pub unsafe fn svprfd_gather_u32base<const OP: svprfop>(pg: svbool_t, bases: svuint32_t) {
31464    unsafe extern "unadjusted" {
31465        #[cfg_attr(
31466            target_arch = "aarch64",
31467            link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32"
31468        )]
31469        fn _svprfd_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
31470    }
31471    _svprfd_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP)
31472}
31473#[doc = "Prefetch bytes"]
31474#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base])"]
31475#[doc = "## Safety"]
31476#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31477#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31478#[inline(always)]
31479#[target_feature(enable = "sve")]
31480#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31481# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
31482pub unsafe fn svprfb_gather_u64base<const OP: svprfop>(pg: svbool_t, bases: svuint64_t) {
31483    unsafe extern "unadjusted" {
31484        #[cfg_attr(
31485            target_arch = "aarch64",
31486            link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64"
31487        )]
31488        fn _svprfb_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
31489    }
31490    _svprfb_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP)
31491}
31492#[doc = "Prefetch halfwords"]
31493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base])"]
31494#[doc = "## Safety"]
31495#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31496#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31497#[inline(always)]
31498#[target_feature(enable = "sve")]
31499#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31500# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))]
31501pub unsafe fn svprfh_gather_u64base<const OP: svprfop>(pg: svbool_t, bases: svuint64_t) {
31502    unsafe extern "unadjusted" {
31503        #[cfg_attr(
31504            target_arch = "aarch64",
31505            link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64"
31506        )]
31507        fn _svprfh_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
31508    }
31509    _svprfh_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP)
31510}
31511#[doc = "Prefetch words"]
31512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base])"]
31513#[doc = "## Safety"]
31514#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31515#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31516#[inline(always)]
31517#[target_feature(enable = "sve")]
31518#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31519# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))]
31520pub unsafe fn svprfw_gather_u64base<const OP: svprfop>(pg: svbool_t, bases: svuint64_t) {
31521    unsafe extern "unadjusted" {
31522        #[cfg_attr(
31523            target_arch = "aarch64",
31524            link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64"
31525        )]
31526        fn _svprfw_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
31527    }
31528    _svprfw_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP)
31529}
31530#[doc = "Prefetch doublewords"]
31531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base])"]
31532#[doc = "## Safety"]
31533#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31534#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31535#[inline(always)]
31536#[target_feature(enable = "sve")]
31537#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31538# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))]
31539pub unsafe fn svprfd_gather_u64base<const OP: svprfop>(pg: svbool_t, bases: svuint64_t) {
31540    unsafe extern "unadjusted" {
31541        #[cfg_attr(
31542            target_arch = "aarch64",
31543            link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64"
31544        )]
31545        fn _svprfd_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
31546    }
31547    _svprfd_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP)
31548}
31549#[doc = "Prefetch bytes"]
31550#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base]_offset)"]
31551#[doc = "## Safety"]
31552#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31553#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31554#[inline(always)]
31555#[target_feature(enable = "sve")]
31556#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31557# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
31558pub unsafe fn svprfb_gather_u32base_offset<const OP: svprfop>(
31559    pg: svbool_t,
31560    bases: svuint32_t,
31561    offset: i64,
31562) {
31563    unsafe extern "unadjusted" {
31564        #[cfg_attr(
31565            target_arch = "aarch64",
31566            link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32"
31567        )]
31568        fn _svprfb_gather_u32base_offset(pg: svbool4_t, bases: svint32_t, offset: i64, op: svprfop);
31569    }
31570    _svprfb_gather_u32base_offset(pg.sve_into(), bases.as_signed(), offset, OP)
31571}
31572#[doc = "Prefetch halfwords"]
31573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base]_index)"]
31574#[doc = "## Safety"]
31575#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31576#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31577#[inline(always)]
31578#[target_feature(enable = "sve")]
31579#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31580# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
31581pub unsafe fn svprfh_gather_u32base_index<const OP: svprfop>(
31582    pg: svbool_t,
31583    bases: svuint32_t,
31584    index: i64,
31585) {
31586    unsafe extern "unadjusted" {
31587        #[cfg_attr(
31588            target_arch = "aarch64",
31589            link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32"
31590        )]
31591        fn _svprfh_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
31592    }
31593    _svprfh_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(1), OP)
31594}
31595#[doc = "Prefetch words"]
31596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base]_index)"]
31597#[doc = "## Safety"]
31598#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31599#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31600#[inline(always)]
31601#[target_feature(enable = "sve")]
31602#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31603# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
31604pub unsafe fn svprfw_gather_u32base_index<const OP: svprfop>(
31605    pg: svbool_t,
31606    bases: svuint32_t,
31607    index: i64,
31608) {
31609    unsafe extern "unadjusted" {
31610        #[cfg_attr(
31611            target_arch = "aarch64",
31612            link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32"
31613        )]
31614        fn _svprfw_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
31615    }
31616    _svprfw_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(2), OP)
31617}
31618#[doc = "Prefetch doublewords"]
31619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base]_index)"]
31620#[doc = "## Safety"]
31621#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31622#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31623#[inline(always)]
31624#[target_feature(enable = "sve")]
31625#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31626# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
31627pub unsafe fn svprfd_gather_u32base_index<const OP: svprfop>(
31628    pg: svbool_t,
31629    bases: svuint32_t,
31630    index: i64,
31631) {
31632    unsafe extern "unadjusted" {
31633        #[cfg_attr(
31634            target_arch = "aarch64",
31635            link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32"
31636        )]
31637        fn _svprfd_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
31638    }
31639    _svprfd_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(3), OP)
31640}
31641#[doc = "Prefetch bytes"]
31642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base]_offset)"]
31643#[doc = "## Safety"]
31644#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31645#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31646#[inline(always)]
31647#[target_feature(enable = "sve")]
31648#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31649# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
31650pub unsafe fn svprfb_gather_u64base_offset<const OP: svprfop>(
31651    pg: svbool_t,
31652    bases: svuint64_t,
31653    offset: i64,
31654) {
31655    unsafe extern "unadjusted" {
31656        #[cfg_attr(
31657            target_arch = "aarch64",
31658            link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64"
31659        )]
31660        fn _svprfb_gather_u64base_offset(pg: svbool2_t, bases: svint64_t, offset: i64, op: svprfop);
31661    }
31662    _svprfb_gather_u64base_offset(pg.sve_into(), bases.as_signed(), offset, OP)
31663}
31664#[doc = "Prefetch halfwords"]
31665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base]_index)"]
31666#[doc = "## Safety"]
31667#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31668#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31669#[inline(always)]
31670#[target_feature(enable = "sve")]
31671#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31672# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
31673pub unsafe fn svprfh_gather_u64base_index<const OP: svprfop>(
31674    pg: svbool_t,
31675    bases: svuint64_t,
31676    index: i64,
31677) {
31678    unsafe extern "unadjusted" {
31679        #[cfg_attr(
31680            target_arch = "aarch64",
31681            link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64"
31682        )]
31683        fn _svprfh_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
31684    }
31685    _svprfh_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(1), OP)
31686}
31687#[doc = "Prefetch words"]
31688#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base]_index)"]
31689#[doc = "## Safety"]
31690#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31691#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31692#[inline(always)]
31693#[target_feature(enable = "sve")]
31694#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31695# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
31696pub unsafe fn svprfw_gather_u64base_index<const OP: svprfop>(
31697    pg: svbool_t,
31698    bases: svuint64_t,
31699    index: i64,
31700) {
31701    unsafe extern "unadjusted" {
31702        #[cfg_attr(
31703            target_arch = "aarch64",
31704            link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64"
31705        )]
31706        fn _svprfw_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
31707    }
31708    _svprfw_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(2), OP)
31709}
31710#[doc = "Prefetch doublewords"]
31711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base]_index)"]
31712#[doc = "## Safety"]
31713#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
31714#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
31715#[inline(always)]
31716#[target_feature(enable = "sve")]
31717#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31718# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
31719pub unsafe fn svprfd_gather_u64base_index<const OP: svprfop>(
31720    pg: svbool_t,
31721    bases: svuint64_t,
31722    index: i64,
31723) {
31724    unsafe extern "unadjusted" {
31725        #[cfg_attr(
31726            target_arch = "aarch64",
31727            link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64"
31728        )]
31729        fn _svprfd_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
31730    }
31731    _svprfd_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(3), OP)
31732}
31733#[doc = "Prefetch bytes"]
31734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_vnum)"]
31735#[doc = "## Safety"]
31736#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
31737#[inline(always)]
31738#[target_feature(enable = "sve")]
31739#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31740# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31741pub unsafe fn svprfb_vnum<const OP: svprfop, T>(pg: svbool_t, base: *const T, vnum: i64) {
31742    svprfb::<OP, _>(pg, base.offset(svcntb() as isize * vnum as isize))
31743}
31744#[doc = "Prefetch halfwords"]
31745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_vnum)"]
31746#[doc = "## Safety"]
31747#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
31748#[inline(always)]
31749#[target_feature(enable = "sve")]
31750#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31751# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31752pub unsafe fn svprfh_vnum<const OP: svprfop, T>(pg: svbool_t, base: *const T, vnum: i64) {
31753    svprfh::<OP, _>(pg, base.offset(svcnth() as isize * vnum as isize))
31754}
31755#[doc = "Prefetch words"]
31756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_vnum)"]
31757#[doc = "## Safety"]
31758#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
31759#[inline(always)]
31760#[target_feature(enable = "sve")]
31761#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31762# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31763pub unsafe fn svprfw_vnum<const OP: svprfop, T>(pg: svbool_t, base: *const T, vnum: i64) {
31764    svprfw::<OP, _>(pg, base.offset(svcntw() as isize * vnum as isize))
31765}
31766#[doc = "Prefetch doublewords"]
31767#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_vnum)"]
31768#[doc = "## Safety"]
31769#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
31770#[inline(always)]
31771#[target_feature(enable = "sve")]
31772#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31773# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
31774pub unsafe fn svprfd_vnum<const OP: svprfop, T>(pg: svbool_t, base: *const T, vnum: i64) {
31775    svprfd::<OP, _>(pg, base.offset(svcntd() as isize * vnum as isize))
31776}
31777#[doc = "Test whether any active element is true"]
31778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_any)"]
31779#[inline(always)]
31780#[target_feature(enable = "sve")]
31781#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31782#[cfg_attr(test, assert_instr(ptest))]
31783pub fn svptest_any(pg: svbool_t, op: svbool_t) -> bool {
31784    unsafe extern "unadjusted" {
31785        #[cfg_attr(
31786            target_arch = "aarch64",
31787            link_name = "llvm.aarch64.sve.ptest.any.nxv16i1"
31788        )]
31789        fn _svptest_any(pg: svbool_t, op: svbool_t) -> bool;
31790    }
31791    unsafe { _svptest_any(pg, op) }
31792}
31793#[doc = "Test whether first active element is true"]
31794#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_first)"]
31795#[inline(always)]
31796#[target_feature(enable = "sve")]
31797#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31798#[cfg_attr(test, assert_instr(ptest))]
31799pub fn svptest_first(pg: svbool_t, op: svbool_t) -> bool {
31800    unsafe extern "unadjusted" {
31801        #[cfg_attr(
31802            target_arch = "aarch64",
31803            link_name = "llvm.aarch64.sve.ptest.first.nxv16i1"
31804        )]
31805        fn _svptest_first(pg: svbool_t, op: svbool_t) -> bool;
31806    }
31807    unsafe { _svptest_first(pg, op) }
31808}
31809#[doc = "Test whether last active element is true"]
31810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_last)"]
31811#[inline(always)]
31812#[target_feature(enable = "sve")]
31813#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31814#[cfg_attr(test, assert_instr(ptest))]
31815pub fn svptest_last(pg: svbool_t, op: svbool_t) -> bool {
31816    unsafe extern "unadjusted" {
31817        #[cfg_attr(
31818            target_arch = "aarch64",
31819            link_name = "llvm.aarch64.sve.ptest.last.nxv16i1"
31820        )]
31821        fn _svptest_last(pg: svbool_t, op: svbool_t) -> bool;
31822    }
31823    unsafe { _svptest_last(pg, op) }
31824}
31825#[doc = "Set predicate elements to true"]
31826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b8)"]
31827#[inline(always)]
31828#[target_feature(enable = "sve")]
31829#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31830#[cfg_attr(test, assert_instr(ptrue))]
31831pub fn svptrue_b8() -> svbool_t {
31832    svptrue_pat_b8::<{ svpattern::SV_ALL }>()
31833}
31834#[doc = "Set predicate elements to true"]
31835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b16)"]
31836#[inline(always)]
31837#[target_feature(enable = "sve")]
31838#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31839#[cfg_attr(test, assert_instr(ptrue))]
31840pub fn svptrue_b16() -> svbool_t {
31841    svptrue_pat_b16::<{ svpattern::SV_ALL }>()
31842}
31843#[doc = "Set predicate elements to true"]
31844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b32)"]
31845#[inline(always)]
31846#[target_feature(enable = "sve")]
31847#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31848#[cfg_attr(test, assert_instr(ptrue))]
31849pub fn svptrue_b32() -> svbool_t {
31850    svptrue_pat_b32::<{ svpattern::SV_ALL }>()
31851}
31852#[doc = "Set predicate elements to true"]
31853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b64)"]
31854#[inline(always)]
31855#[target_feature(enable = "sve")]
31856#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31857#[cfg_attr(test, assert_instr(ptrue))]
31858pub fn svptrue_b64() -> svbool_t {
31859    svptrue_pat_b64::<{ svpattern::SV_ALL }>()
31860}
31861#[doc = "Set predicate elements to true"]
31862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"]
31863#[inline(always)]
31864#[target_feature(enable = "sve")]
31865#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31866# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))]
31867pub fn svptrue_pat_b8<const PATTERN: svpattern>() -> svbool_t {
31868    unsafe extern "unadjusted" {
31869        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")]
31870        fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t;
31871    }
31872    unsafe { _svptrue_pat_b8(PATTERN) }
31873}
31874#[doc = "Set predicate elements to true"]
31875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"]
31876#[inline(always)]
31877#[target_feature(enable = "sve")]
31878#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31879# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))]
31880pub fn svptrue_pat_b16<const PATTERN: svpattern>() -> svbool_t {
31881    unsafe extern "unadjusted" {
31882        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")]
31883        fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t;
31884    }
31885    unsafe { _svptrue_pat_b16(PATTERN).sve_into() }
31886}
31887#[doc = "Set predicate elements to true"]
31888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"]
31889#[inline(always)]
31890#[target_feature(enable = "sve")]
31891#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31892# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))]
31893pub fn svptrue_pat_b32<const PATTERN: svpattern>() -> svbool_t {
31894    unsafe extern "unadjusted" {
31895        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")]
31896        fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t;
31897    }
31898    unsafe { _svptrue_pat_b32(PATTERN).sve_into() }
31899}
31900#[doc = "Set predicate elements to true"]
31901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"]
31902#[inline(always)]
31903#[target_feature(enable = "sve")]
31904#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31905# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))]
31906pub fn svptrue_pat_b64<const PATTERN: svpattern>() -> svbool_t {
31907    unsafe extern "unadjusted" {
31908        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")]
31909        fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t;
31910    }
31911    unsafe { _svptrue_pat_b64(PATTERN).sve_into() }
31912}
31913#[doc = "Saturating add"]
31914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8])"]
31915#[inline(always)]
31916#[target_feature(enable = "sve")]
31917#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31918#[cfg_attr(test, assert_instr(sqadd))]
31919pub fn svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
31920    unsafe extern "unadjusted" {
31921        #[cfg_attr(
31922            target_arch = "aarch64",
31923            link_name = "llvm.aarch64.sve.sqadd.x.nxv16i8"
31924        )]
31925        fn _svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
31926    }
31927    unsafe { _svqadd_s8(op1, op2) }
31928}
31929#[doc = "Saturating add"]
31930#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8])"]
31931#[inline(always)]
31932#[target_feature(enable = "sve")]
31933#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31934#[cfg_attr(test, assert_instr(sqadd))]
31935pub fn svqadd_n_s8(op1: svint8_t, op2: i8) -> svint8_t {
31936    svqadd_s8(op1, svdup_n_s8(op2))
31937}
31938#[doc = "Saturating add"]
31939#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16])"]
31940#[inline(always)]
31941#[target_feature(enable = "sve")]
31942#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31943#[cfg_attr(test, assert_instr(sqadd))]
31944pub fn svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
31945    unsafe extern "unadjusted" {
31946        #[cfg_attr(
31947            target_arch = "aarch64",
31948            link_name = "llvm.aarch64.sve.sqadd.x.nxv8i16"
31949        )]
31950        fn _svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
31951    }
31952    unsafe { _svqadd_s16(op1, op2) }
31953}
31954#[doc = "Saturating add"]
31955#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16])"]
31956#[inline(always)]
31957#[target_feature(enable = "sve")]
31958#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31959#[cfg_attr(test, assert_instr(sqadd))]
31960pub fn svqadd_n_s16(op1: svint16_t, op2: i16) -> svint16_t {
31961    svqadd_s16(op1, svdup_n_s16(op2))
31962}
31963#[doc = "Saturating add"]
31964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32])"]
31965#[inline(always)]
31966#[target_feature(enable = "sve")]
31967#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31968#[cfg_attr(test, assert_instr(sqadd))]
31969pub fn svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
31970    unsafe extern "unadjusted" {
31971        #[cfg_attr(
31972            target_arch = "aarch64",
31973            link_name = "llvm.aarch64.sve.sqadd.x.nxv4i32"
31974        )]
31975        fn _svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
31976    }
31977    unsafe { _svqadd_s32(op1, op2) }
31978}
31979#[doc = "Saturating add"]
31980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32])"]
31981#[inline(always)]
31982#[target_feature(enable = "sve")]
31983#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31984#[cfg_attr(test, assert_instr(sqadd))]
31985pub fn svqadd_n_s32(op1: svint32_t, op2: i32) -> svint32_t {
31986    svqadd_s32(op1, svdup_n_s32(op2))
31987}
31988#[doc = "Saturating add"]
31989#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64])"]
31990#[inline(always)]
31991#[target_feature(enable = "sve")]
31992#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
31993#[cfg_attr(test, assert_instr(sqadd))]
31994pub fn svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
31995    unsafe extern "unadjusted" {
31996        #[cfg_attr(
31997            target_arch = "aarch64",
31998            link_name = "llvm.aarch64.sve.sqadd.x.nxv2i64"
31999        )]
32000        fn _svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
32001    }
32002    unsafe { _svqadd_s64(op1, op2) }
32003}
32004#[doc = "Saturating add"]
32005#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64])"]
32006#[inline(always)]
32007#[target_feature(enable = "sve")]
32008#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32009#[cfg_attr(test, assert_instr(sqadd))]
32010pub fn svqadd_n_s64(op1: svint64_t, op2: i64) -> svint64_t {
32011    svqadd_s64(op1, svdup_n_s64(op2))
32012}
32013#[doc = "Saturating add"]
32014#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8])"]
32015#[inline(always)]
32016#[target_feature(enable = "sve")]
32017#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32018#[cfg_attr(test, assert_instr(uqadd))]
32019pub fn svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
32020    unsafe extern "unadjusted" {
32021        #[cfg_attr(
32022            target_arch = "aarch64",
32023            link_name = "llvm.aarch64.sve.uqadd.x.nxv16i8"
32024        )]
32025        fn _svqadd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
32026    }
32027    unsafe { _svqadd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
32028}
32029#[doc = "Saturating add"]
32030#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8])"]
32031#[inline(always)]
32032#[target_feature(enable = "sve")]
32033#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32034#[cfg_attr(test, assert_instr(uqadd))]
32035pub fn svqadd_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
32036    svqadd_u8(op1, svdup_n_u8(op2))
32037}
32038#[doc = "Saturating add"]
32039#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16])"]
32040#[inline(always)]
32041#[target_feature(enable = "sve")]
32042#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32043#[cfg_attr(test, assert_instr(uqadd))]
32044pub fn svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
32045    unsafe extern "unadjusted" {
32046        #[cfg_attr(
32047            target_arch = "aarch64",
32048            link_name = "llvm.aarch64.sve.uqadd.x.nxv8i16"
32049        )]
32050        fn _svqadd_u16(op1: svint16_t, op2: svint16_t) -> svint16_t;
32051    }
32052    unsafe { _svqadd_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
32053}
32054#[doc = "Saturating add"]
32055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16])"]
32056#[inline(always)]
32057#[target_feature(enable = "sve")]
32058#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32059#[cfg_attr(test, assert_instr(uqadd))]
32060pub fn svqadd_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
32061    svqadd_u16(op1, svdup_n_u16(op2))
32062}
32063#[doc = "Saturating add"]
32064#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32])"]
32065#[inline(always)]
32066#[target_feature(enable = "sve")]
32067#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32068#[cfg_attr(test, assert_instr(uqadd))]
32069pub fn svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
32070    unsafe extern "unadjusted" {
32071        #[cfg_attr(
32072            target_arch = "aarch64",
32073            link_name = "llvm.aarch64.sve.uqadd.x.nxv4i32"
32074        )]
32075        fn _svqadd_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
32076    }
32077    unsafe { _svqadd_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
32078}
32079#[doc = "Saturating add"]
32080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32])"]
32081#[inline(always)]
32082#[target_feature(enable = "sve")]
32083#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32084#[cfg_attr(test, assert_instr(uqadd))]
32085pub fn svqadd_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
32086    svqadd_u32(op1, svdup_n_u32(op2))
32087}
32088#[doc = "Saturating add"]
32089#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64])"]
32090#[inline(always)]
32091#[target_feature(enable = "sve")]
32092#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32093#[cfg_attr(test, assert_instr(uqadd))]
32094pub fn svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
32095    unsafe extern "unadjusted" {
32096        #[cfg_attr(
32097            target_arch = "aarch64",
32098            link_name = "llvm.aarch64.sve.uqadd.x.nxv2i64"
32099        )]
32100        fn _svqadd_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
32101    }
32102    unsafe { _svqadd_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
32103}
32104#[doc = "Saturating add"]
32105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64])"]
32106#[inline(always)]
32107#[target_feature(enable = "sve")]
32108#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32109#[cfg_attr(test, assert_instr(uqadd))]
32110pub fn svqadd_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
32111    svqadd_u64(op1, svdup_n_u64(op2))
32112}
32113#[doc = "Saturating decrement by number of byte elements"]
32114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s32])"]
32115#[inline(always)]
32116#[target_feature(enable = "sve")]
32117#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32118#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))]
32119pub fn svqdecb_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
32120    svqdecb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32121}
32122#[doc = "Saturating decrement by number of halfword elements"]
32123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s32])"]
32124#[inline(always)]
32125#[target_feature(enable = "sve")]
32126#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32127#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))]
32128pub fn svqdech_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
32129    svqdech_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32130}
32131#[doc = "Saturating decrement by number of word elements"]
32132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s32])"]
32133#[inline(always)]
32134#[target_feature(enable = "sve")]
32135#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32136#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))]
32137pub fn svqdecw_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
32138    svqdecw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32139}
32140#[doc = "Saturating decrement by number of doubleword elements"]
32141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s32])"]
32142#[inline(always)]
32143#[target_feature(enable = "sve")]
32144#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32145#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))]
32146pub fn svqdecd_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
32147    svqdecd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32148}
32149#[doc = "Saturating decrement by number of byte elements"]
32150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s64])"]
32151#[inline(always)]
32152#[target_feature(enable = "sve")]
32153#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32154#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))]
32155pub fn svqdecb_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
32156    svqdecb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32157}
32158#[doc = "Saturating decrement by number of halfword elements"]
32159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s64])"]
32160#[inline(always)]
32161#[target_feature(enable = "sve")]
32162#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32163#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))]
32164pub fn svqdech_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
32165    svqdech_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32166}
32167#[doc = "Saturating decrement by number of word elements"]
32168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s64])"]
32169#[inline(always)]
32170#[target_feature(enable = "sve")]
32171#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32172#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))]
32173pub fn svqdecw_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
32174    svqdecw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32175}
32176#[doc = "Saturating decrement by number of doubleword elements"]
32177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s64])"]
32178#[inline(always)]
32179#[target_feature(enable = "sve")]
32180#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32181#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))]
32182pub fn svqdecd_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
32183    svqdecd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32184}
32185#[doc = "Saturating decrement by number of byte elements"]
32186#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u32])"]
32187#[inline(always)]
32188#[target_feature(enable = "sve")]
32189#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32190#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))]
32191pub fn svqdecb_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
32192    svqdecb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32193}
32194#[doc = "Saturating decrement by number of halfword elements"]
32195#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u32])"]
32196#[inline(always)]
32197#[target_feature(enable = "sve")]
32198#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32199#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))]
32200pub fn svqdech_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
32201    svqdech_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32202}
32203#[doc = "Saturating decrement by number of word elements"]
32204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u32])"]
32205#[inline(always)]
32206#[target_feature(enable = "sve")]
32207#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32208#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))]
32209pub fn svqdecw_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
32210    svqdecw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32211}
32212#[doc = "Saturating decrement by number of doubleword elements"]
32213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u32])"]
32214#[inline(always)]
32215#[target_feature(enable = "sve")]
32216#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32217#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))]
32218pub fn svqdecd_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
32219    svqdecd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32220}
32221#[doc = "Saturating decrement by number of byte elements"]
32222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u64])"]
32223#[inline(always)]
32224#[target_feature(enable = "sve")]
32225#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32226#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))]
32227pub fn svqdecb_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
32228    svqdecb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32229}
32230#[doc = "Saturating decrement by number of halfword elements"]
32231#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u64])"]
32232#[inline(always)]
32233#[target_feature(enable = "sve")]
32234#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32235#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))]
32236pub fn svqdech_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
32237    svqdech_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32238}
32239#[doc = "Saturating decrement by number of word elements"]
32240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u64])"]
32241#[inline(always)]
32242#[target_feature(enable = "sve")]
32243#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32244#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))]
32245pub fn svqdecw_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
32246    svqdecw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32247}
32248#[doc = "Saturating decrement by number of doubleword elements"]
32249#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u64])"]
32250#[inline(always)]
32251#[target_feature(enable = "sve")]
32252#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32253#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))]
32254pub fn svqdecd_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
32255    svqdecd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32256}
32257#[doc = "Saturating decrement by number of byte elements"]
32258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s32])"]
32259#[inline(always)]
32260#[target_feature(enable = "sve")]
32261#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32262# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32263pub fn svqdecb_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
32264    static_assert_range!(IMM_FACTOR, 1..=16);
32265    unsafe extern "unadjusted" {
32266        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n32")]
32267        fn _svqdecb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
32268    }
32269    unsafe { _svqdecb_pat_n_s32(op, PATTERN, IMM_FACTOR) }
32270}
32271#[doc = "Saturating decrement by number of halfword elements"]
32272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s32])"]
32273#[inline(always)]
32274#[target_feature(enable = "sve")]
32275#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32276# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32277pub fn svqdech_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
32278    static_assert_range!(IMM_FACTOR, 1..=16);
32279    unsafe extern "unadjusted" {
32280        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n32")]
32281        fn _svqdech_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
32282    }
32283    unsafe { _svqdech_pat_n_s32(op, PATTERN, IMM_FACTOR) }
32284}
32285#[doc = "Saturating decrement by number of word elements"]
32286#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s32])"]
32287#[inline(always)]
32288#[target_feature(enable = "sve")]
32289#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32290# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32291pub fn svqdecw_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
32292    static_assert_range!(IMM_FACTOR, 1..=16);
32293    unsafe extern "unadjusted" {
32294        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n32")]
32295        fn _svqdecw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
32296    }
32297    unsafe { _svqdecw_pat_n_s32(op, PATTERN, IMM_FACTOR) }
32298}
32299#[doc = "Saturating decrement by number of doubleword elements"]
32300#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s32])"]
32301#[inline(always)]
32302#[target_feature(enable = "sve")]
32303#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32304# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32305pub fn svqdecd_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
32306    static_assert_range!(IMM_FACTOR, 1..=16);
32307    unsafe extern "unadjusted" {
32308        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n32")]
32309        fn _svqdecd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
32310    }
32311    unsafe { _svqdecd_pat_n_s32(op, PATTERN, IMM_FACTOR) }
32312}
32313#[doc = "Saturating decrement by number of byte elements"]
32314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s64])"]
32315#[inline(always)]
32316#[target_feature(enable = "sve")]
32317#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32318# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32319pub fn svqdecb_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
32320    static_assert_range!(IMM_FACTOR, 1..=16);
32321    unsafe extern "unadjusted" {
32322        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n64")]
32323        fn _svqdecb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
32324    }
32325    unsafe { _svqdecb_pat_n_s64(op, PATTERN, IMM_FACTOR) }
32326}
32327#[doc = "Saturating decrement by number of halfword elements"]
32328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s64])"]
32329#[inline(always)]
32330#[target_feature(enable = "sve")]
32331#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32332# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32333pub fn svqdech_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
32334    static_assert_range!(IMM_FACTOR, 1..=16);
32335    unsafe extern "unadjusted" {
32336        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n64")]
32337        fn _svqdech_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
32338    }
32339    unsafe { _svqdech_pat_n_s64(op, PATTERN, IMM_FACTOR) }
32340}
32341#[doc = "Saturating decrement by number of word elements"]
32342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s64])"]
32343#[inline(always)]
32344#[target_feature(enable = "sve")]
32345#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32346# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32347pub fn svqdecw_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
32348    static_assert_range!(IMM_FACTOR, 1..=16);
32349    unsafe extern "unadjusted" {
32350        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n64")]
32351        fn _svqdecw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
32352    }
32353    unsafe { _svqdecw_pat_n_s64(op, PATTERN, IMM_FACTOR) }
32354}
32355#[doc = "Saturating decrement by number of doubleword elements"]
32356#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s64])"]
32357#[inline(always)]
32358#[target_feature(enable = "sve")]
32359#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32360# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32361pub fn svqdecd_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
32362    static_assert_range!(IMM_FACTOR, 1..=16);
32363    unsafe extern "unadjusted" {
32364        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n64")]
32365        fn _svqdecd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
32366    }
32367    unsafe { _svqdecd_pat_n_s64(op, PATTERN, IMM_FACTOR) }
32368}
32369#[doc = "Saturating decrement by number of byte elements"]
32370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u32])"]
32371#[inline(always)]
32372#[target_feature(enable = "sve")]
32373#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32374# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32375pub fn svqdecb_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
32376    static_assert_range!(IMM_FACTOR, 1..=16);
32377    unsafe extern "unadjusted" {
32378        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n32")]
32379        fn _svqdecb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
32380    }
32381    unsafe { _svqdecb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
32382}
32383#[doc = "Saturating decrement by number of halfword elements"]
32384#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u32])"]
32385#[inline(always)]
32386#[target_feature(enable = "sve")]
32387#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32388# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32389pub fn svqdech_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
32390    static_assert_range!(IMM_FACTOR, 1..=16);
32391    unsafe extern "unadjusted" {
32392        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n32")]
32393        fn _svqdech_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
32394    }
32395    unsafe { _svqdech_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
32396}
32397#[doc = "Saturating decrement by number of word elements"]
32398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u32])"]
32399#[inline(always)]
32400#[target_feature(enable = "sve")]
32401#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32402# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32403pub fn svqdecw_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
32404    static_assert_range!(IMM_FACTOR, 1..=16);
32405    unsafe extern "unadjusted" {
32406        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n32")]
32407        fn _svqdecw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
32408    }
32409    unsafe { _svqdecw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
32410}
32411#[doc = "Saturating decrement by number of doubleword elements"]
32412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u32])"]
32413#[inline(always)]
32414#[target_feature(enable = "sve")]
32415#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32416# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32417pub fn svqdecd_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
32418    static_assert_range!(IMM_FACTOR, 1..=16);
32419    unsafe extern "unadjusted" {
32420        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n32")]
32421        fn _svqdecd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
32422    }
32423    unsafe { _svqdecd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
32424}
32425#[doc = "Saturating decrement by number of byte elements"]
32426#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u64])"]
32427#[inline(always)]
32428#[target_feature(enable = "sve")]
32429#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32430# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32431pub fn svqdecb_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
32432    static_assert_range!(IMM_FACTOR, 1..=16);
32433    unsafe extern "unadjusted" {
32434        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n64")]
32435        fn _svqdecb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
32436    }
32437    unsafe { _svqdecb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
32438}
32439#[doc = "Saturating decrement by number of halfword elements"]
32440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u64])"]
32441#[inline(always)]
32442#[target_feature(enable = "sve")]
32443#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32444# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32445pub fn svqdech_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
32446    static_assert_range!(IMM_FACTOR, 1..=16);
32447    unsafe extern "unadjusted" {
32448        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n64")]
32449        fn _svqdech_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
32450    }
32451    unsafe { _svqdech_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
32452}
32453#[doc = "Saturating decrement by number of word elements"]
32454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u64])"]
32455#[inline(always)]
32456#[target_feature(enable = "sve")]
32457#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32458# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32459pub fn svqdecw_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
32460    static_assert_range!(IMM_FACTOR, 1..=16);
32461    unsafe extern "unadjusted" {
32462        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n64")]
32463        fn _svqdecw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
32464    }
32465    unsafe { _svqdecw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
32466}
32467#[doc = "Saturating decrement by number of doubleword elements"]
32468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u64])"]
32469#[inline(always)]
32470#[target_feature(enable = "sve")]
32471#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32472# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32473pub fn svqdecd_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
32474    static_assert_range!(IMM_FACTOR, 1..=16);
32475    unsafe extern "unadjusted" {
32476        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n64")]
32477        fn _svqdecd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
32478    }
32479    unsafe { _svqdecd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
32480}
32481#[doc = "Saturating decrement by number of halfword elements"]
32482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_s16])"]
32483#[inline(always)]
32484#[target_feature(enable = "sve")]
32485#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32486# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32487pub fn svqdech_pat_s16<const PATTERN: svpattern, const IMM_FACTOR: i32>(
32488    op: svint16_t,
32489) -> svint16_t {
32490    static_assert_range!(IMM_FACTOR, 1..=16);
32491    unsafe extern "unadjusted" {
32492        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.nxv8i16")]
32493        fn _svqdech_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t;
32494    }
32495    unsafe { _svqdech_pat_s16(op, PATTERN, IMM_FACTOR) }
32496}
32497#[doc = "Saturating decrement by number of word elements"]
32498#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_s32])"]
32499#[inline(always)]
32500#[target_feature(enable = "sve")]
32501#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32502# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32503pub fn svqdecw_pat_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(
32504    op: svint32_t,
32505) -> svint32_t {
32506    static_assert_range!(IMM_FACTOR, 1..=16);
32507    unsafe extern "unadjusted" {
32508        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.nxv4i32")]
32509        fn _svqdecw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t;
32510    }
32511    unsafe { _svqdecw_pat_s32(op, PATTERN, IMM_FACTOR) }
32512}
32513#[doc = "Saturating decrement by number of doubleword elements"]
32514#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_s64])"]
32515#[inline(always)]
32516#[target_feature(enable = "sve")]
32517#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32518# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32519pub fn svqdecd_pat_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(
32520    op: svint64_t,
32521) -> svint64_t {
32522    static_assert_range!(IMM_FACTOR, 1..=16);
32523    unsafe extern "unadjusted" {
32524        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.nxv2i64")]
32525        fn _svqdecd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t;
32526    }
32527    unsafe { _svqdecd_pat_s64(op, PATTERN, IMM_FACTOR) }
32528}
32529#[doc = "Saturating decrement by number of halfword elements"]
32530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_u16])"]
32531#[inline(always)]
32532#[target_feature(enable = "sve")]
32533#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32534# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32535pub fn svqdech_pat_u16<const PATTERN: svpattern, const IMM_FACTOR: i32>(
32536    op: svuint16_t,
32537) -> svuint16_t {
32538    static_assert_range!(IMM_FACTOR, 1..=16);
32539    unsafe extern "unadjusted" {
32540        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.nxv8i16")]
32541        fn _svqdech_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t;
32542    }
32543    unsafe { _svqdech_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
32544}
32545#[doc = "Saturating decrement by number of word elements"]
32546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_u32])"]
32547#[inline(always)]
32548#[target_feature(enable = "sve")]
32549#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32550# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32551pub fn svqdecw_pat_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(
32552    op: svuint32_t,
32553) -> svuint32_t {
32554    static_assert_range!(IMM_FACTOR, 1..=16);
32555    unsafe extern "unadjusted" {
32556        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.nxv4i32")]
32557        fn _svqdecw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t;
32558    }
32559    unsafe { _svqdecw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
32560}
32561#[doc = "Saturating decrement by number of doubleword elements"]
32562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_u64])"]
32563#[inline(always)]
32564#[target_feature(enable = "sve")]
32565#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32566# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
32567pub fn svqdecd_pat_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(
32568    op: svuint64_t,
32569) -> svuint64_t {
32570    static_assert_range!(IMM_FACTOR, 1..=16);
32571    unsafe extern "unadjusted" {
32572        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.nxv2i64")]
32573        fn _svqdecd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t;
32574    }
32575    unsafe { _svqdecd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
32576}
32577#[doc = "Saturating decrement by number of halfword elements"]
32578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_s16])"]
32579#[inline(always)]
32580#[target_feature(enable = "sve")]
32581#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32582#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))]
32583pub fn svqdech_s16<const IMM_FACTOR: i32>(op: svint16_t) -> svint16_t {
32584    svqdech_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32585}
32586#[doc = "Saturating decrement by number of word elements"]
32587#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_s32])"]
32588#[inline(always)]
32589#[target_feature(enable = "sve")]
32590#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32591#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))]
32592pub fn svqdecw_s32<const IMM_FACTOR: i32>(op: svint32_t) -> svint32_t {
32593    svqdecw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32594}
32595#[doc = "Saturating decrement by number of doubleword elements"]
32596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_s64])"]
32597#[inline(always)]
32598#[target_feature(enable = "sve")]
32599#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32600#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))]
32601pub fn svqdecd_s64<const IMM_FACTOR: i32>(op: svint64_t) -> svint64_t {
32602    svqdecd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32603}
32604#[doc = "Saturating decrement by number of halfword elements"]
32605#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_u16])"]
32606#[inline(always)]
32607#[target_feature(enable = "sve")]
32608#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32609#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))]
32610pub fn svqdech_u16<const IMM_FACTOR: i32>(op: svuint16_t) -> svuint16_t {
32611    svqdech_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32612}
32613#[doc = "Saturating decrement by number of word elements"]
32614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_u32])"]
32615#[inline(always)]
32616#[target_feature(enable = "sve")]
32617#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32618#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))]
32619pub fn svqdecw_u32<const IMM_FACTOR: i32>(op: svuint32_t) -> svuint32_t {
32620    svqdecw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32621}
32622#[doc = "Saturating decrement by number of doubleword elements"]
32623#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_u64])"]
32624#[inline(always)]
32625#[target_feature(enable = "sve")]
32626#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32627#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))]
32628pub fn svqdecd_u64<const IMM_FACTOR: i32>(op: svuint64_t) -> svuint64_t {
32629    svqdecd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32630}
32631#[doc = "Saturating decrement by active element count"]
32632#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b8)"]
32633#[inline(always)]
32634#[target_feature(enable = "sve")]
32635#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32636#[cfg_attr(test, assert_instr(sqdecp))]
32637pub fn svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32 {
32638    unsafe extern "unadjusted" {
32639        #[cfg_attr(
32640            target_arch = "aarch64",
32641            link_name = "llvm.aarch64.sve.sqdecp.n32.nxv16i1"
32642        )]
32643        fn _svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32;
32644    }
32645    unsafe { _svqdecp_n_s32_b8(op, pg) }
32646}
32647#[doc = "Saturating decrement by active element count"]
32648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b16)"]
32649#[inline(always)]
32650#[target_feature(enable = "sve")]
32651#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32652#[cfg_attr(test, assert_instr(sqdecp))]
32653pub fn svqdecp_n_s32_b16(op: i32, pg: svbool_t) -> i32 {
32654    unsafe extern "unadjusted" {
32655        #[cfg_attr(
32656            target_arch = "aarch64",
32657            link_name = "llvm.aarch64.sve.sqdecp.n32.nxv8i1"
32658        )]
32659        fn _svqdecp_n_s32_b16(op: i32, pg: svbool8_t) -> i32;
32660    }
32661    unsafe { _svqdecp_n_s32_b16(op, pg.sve_into()) }
32662}
32663#[doc = "Saturating decrement by active element count"]
32664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b32)"]
32665#[inline(always)]
32666#[target_feature(enable = "sve")]
32667#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32668#[cfg_attr(test, assert_instr(sqdecp))]
32669pub fn svqdecp_n_s32_b32(op: i32, pg: svbool_t) -> i32 {
32670    unsafe extern "unadjusted" {
32671        #[cfg_attr(
32672            target_arch = "aarch64",
32673            link_name = "llvm.aarch64.sve.sqdecp.n32.nxv4i1"
32674        )]
32675        fn _svqdecp_n_s32_b32(op: i32, pg: svbool4_t) -> i32;
32676    }
32677    unsafe { _svqdecp_n_s32_b32(op, pg.sve_into()) }
32678}
32679#[doc = "Saturating decrement by active element count"]
32680#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b64)"]
32681#[inline(always)]
32682#[target_feature(enable = "sve")]
32683#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32684#[cfg_attr(test, assert_instr(sqdecp))]
32685pub fn svqdecp_n_s32_b64(op: i32, pg: svbool_t) -> i32 {
32686    unsafe extern "unadjusted" {
32687        #[cfg_attr(
32688            target_arch = "aarch64",
32689            link_name = "llvm.aarch64.sve.sqdecp.n32.nxv2i1"
32690        )]
32691        fn _svqdecp_n_s32_b64(op: i32, pg: svbool2_t) -> i32;
32692    }
32693    unsafe { _svqdecp_n_s32_b64(op, pg.sve_into()) }
32694}
32695#[doc = "Saturating decrement by active element count"]
32696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b8)"]
32697#[inline(always)]
32698#[target_feature(enable = "sve")]
32699#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32700#[cfg_attr(test, assert_instr(sqdecp))]
32701pub fn svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64 {
32702    unsafe extern "unadjusted" {
32703        #[cfg_attr(
32704            target_arch = "aarch64",
32705            link_name = "llvm.aarch64.sve.sqdecp.n64.nxv16i1"
32706        )]
32707        fn _svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64;
32708    }
32709    unsafe { _svqdecp_n_s64_b8(op, pg) }
32710}
32711#[doc = "Saturating decrement by active element count"]
32712#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b16)"]
32713#[inline(always)]
32714#[target_feature(enable = "sve")]
32715#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32716#[cfg_attr(test, assert_instr(sqdecp))]
32717pub fn svqdecp_n_s64_b16(op: i64, pg: svbool_t) -> i64 {
32718    unsafe extern "unadjusted" {
32719        #[cfg_attr(
32720            target_arch = "aarch64",
32721            link_name = "llvm.aarch64.sve.sqdecp.n64.nxv8i1"
32722        )]
32723        fn _svqdecp_n_s64_b16(op: i64, pg: svbool8_t) -> i64;
32724    }
32725    unsafe { _svqdecp_n_s64_b16(op, pg.sve_into()) }
32726}
32727#[doc = "Saturating decrement by active element count"]
32728#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b32)"]
32729#[inline(always)]
32730#[target_feature(enable = "sve")]
32731#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32732#[cfg_attr(test, assert_instr(sqdecp))]
32733pub fn svqdecp_n_s64_b32(op: i64, pg: svbool_t) -> i64 {
32734    unsafe extern "unadjusted" {
32735        #[cfg_attr(
32736            target_arch = "aarch64",
32737            link_name = "llvm.aarch64.sve.sqdecp.n64.nxv4i1"
32738        )]
32739        fn _svqdecp_n_s64_b32(op: i64, pg: svbool4_t) -> i64;
32740    }
32741    unsafe { _svqdecp_n_s64_b32(op, pg.sve_into()) }
32742}
32743#[doc = "Saturating decrement by active element count"]
32744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b64)"]
32745#[inline(always)]
32746#[target_feature(enable = "sve")]
32747#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32748#[cfg_attr(test, assert_instr(sqdecp))]
32749pub fn svqdecp_n_s64_b64(op: i64, pg: svbool_t) -> i64 {
32750    unsafe extern "unadjusted" {
32751        #[cfg_attr(
32752            target_arch = "aarch64",
32753            link_name = "llvm.aarch64.sve.sqdecp.n64.nxv2i1"
32754        )]
32755        fn _svqdecp_n_s64_b64(op: i64, pg: svbool2_t) -> i64;
32756    }
32757    unsafe { _svqdecp_n_s64_b64(op, pg.sve_into()) }
32758}
32759#[doc = "Saturating decrement by active element count"]
32760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b8)"]
32761#[inline(always)]
32762#[target_feature(enable = "sve")]
32763#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32764#[cfg_attr(test, assert_instr(uqdecp))]
32765pub fn svqdecp_n_u32_b8(op: u32, pg: svbool_t) -> u32 {
32766    unsafe extern "unadjusted" {
32767        #[cfg_attr(
32768            target_arch = "aarch64",
32769            link_name = "llvm.aarch64.sve.uqdecp.n32.nxv16i1"
32770        )]
32771        fn _svqdecp_n_u32_b8(op: i32, pg: svbool_t) -> i32;
32772    }
32773    unsafe { _svqdecp_n_u32_b8(op.as_signed(), pg).as_unsigned() }
32774}
32775#[doc = "Saturating decrement by active element count"]
32776#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b16)"]
32777#[inline(always)]
32778#[target_feature(enable = "sve")]
32779#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32780#[cfg_attr(test, assert_instr(uqdecp))]
32781pub fn svqdecp_n_u32_b16(op: u32, pg: svbool_t) -> u32 {
32782    unsafe extern "unadjusted" {
32783        #[cfg_attr(
32784            target_arch = "aarch64",
32785            link_name = "llvm.aarch64.sve.uqdecp.n32.nxv8i1"
32786        )]
32787        fn _svqdecp_n_u32_b16(op: i32, pg: svbool8_t) -> i32;
32788    }
32789    unsafe { _svqdecp_n_u32_b16(op.as_signed(), pg.sve_into()).as_unsigned() }
32790}
32791#[doc = "Saturating decrement by active element count"]
32792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b32)"]
32793#[inline(always)]
32794#[target_feature(enable = "sve")]
32795#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32796#[cfg_attr(test, assert_instr(uqdecp))]
32797pub fn svqdecp_n_u32_b32(op: u32, pg: svbool_t) -> u32 {
32798    unsafe extern "unadjusted" {
32799        #[cfg_attr(
32800            target_arch = "aarch64",
32801            link_name = "llvm.aarch64.sve.uqdecp.n32.nxv4i1"
32802        )]
32803        fn _svqdecp_n_u32_b32(op: i32, pg: svbool4_t) -> i32;
32804    }
32805    unsafe { _svqdecp_n_u32_b32(op.as_signed(), pg.sve_into()).as_unsigned() }
32806}
32807#[doc = "Saturating decrement by active element count"]
32808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b64)"]
32809#[inline(always)]
32810#[target_feature(enable = "sve")]
32811#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32812#[cfg_attr(test, assert_instr(uqdecp))]
32813pub fn svqdecp_n_u32_b64(op: u32, pg: svbool_t) -> u32 {
32814    unsafe extern "unadjusted" {
32815        #[cfg_attr(
32816            target_arch = "aarch64",
32817            link_name = "llvm.aarch64.sve.uqdecp.n32.nxv2i1"
32818        )]
32819        fn _svqdecp_n_u32_b64(op: i32, pg: svbool2_t) -> i32;
32820    }
32821    unsafe { _svqdecp_n_u32_b64(op.as_signed(), pg.sve_into()).as_unsigned() }
32822}
32823#[doc = "Saturating decrement by active element count"]
32824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b8)"]
32825#[inline(always)]
32826#[target_feature(enable = "sve")]
32827#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32828#[cfg_attr(test, assert_instr(uqdecp))]
32829pub fn svqdecp_n_u64_b8(op: u64, pg: svbool_t) -> u64 {
32830    unsafe extern "unadjusted" {
32831        #[cfg_attr(
32832            target_arch = "aarch64",
32833            link_name = "llvm.aarch64.sve.uqdecp.n64.nxv16i1"
32834        )]
32835        fn _svqdecp_n_u64_b8(op: i64, pg: svbool_t) -> i64;
32836    }
32837    unsafe { _svqdecp_n_u64_b8(op.as_signed(), pg).as_unsigned() }
32838}
32839#[doc = "Saturating decrement by active element count"]
32840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b16)"]
32841#[inline(always)]
32842#[target_feature(enable = "sve")]
32843#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32844#[cfg_attr(test, assert_instr(uqdecp))]
32845pub fn svqdecp_n_u64_b16(op: u64, pg: svbool_t) -> u64 {
32846    unsafe extern "unadjusted" {
32847        #[cfg_attr(
32848            target_arch = "aarch64",
32849            link_name = "llvm.aarch64.sve.uqdecp.n64.nxv8i1"
32850        )]
32851        fn _svqdecp_n_u64_b16(op: i64, pg: svbool8_t) -> i64;
32852    }
32853    unsafe { _svqdecp_n_u64_b16(op.as_signed(), pg.sve_into()).as_unsigned() }
32854}
32855#[doc = "Saturating decrement by active element count"]
32856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b32)"]
32857#[inline(always)]
32858#[target_feature(enable = "sve")]
32859#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32860#[cfg_attr(test, assert_instr(uqdecp))]
32861pub fn svqdecp_n_u64_b32(op: u64, pg: svbool_t) -> u64 {
32862    unsafe extern "unadjusted" {
32863        #[cfg_attr(
32864            target_arch = "aarch64",
32865            link_name = "llvm.aarch64.sve.uqdecp.n64.nxv4i1"
32866        )]
32867        fn _svqdecp_n_u64_b32(op: i64, pg: svbool4_t) -> i64;
32868    }
32869    unsafe { _svqdecp_n_u64_b32(op.as_signed(), pg.sve_into()).as_unsigned() }
32870}
32871#[doc = "Saturating decrement by active element count"]
32872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b64)"]
32873#[inline(always)]
32874#[target_feature(enable = "sve")]
32875#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32876#[cfg_attr(test, assert_instr(uqdecp))]
32877pub fn svqdecp_n_u64_b64(op: u64, pg: svbool_t) -> u64 {
32878    unsafe extern "unadjusted" {
32879        #[cfg_attr(
32880            target_arch = "aarch64",
32881            link_name = "llvm.aarch64.sve.uqdecp.n64.nxv2i1"
32882        )]
32883        fn _svqdecp_n_u64_b64(op: i64, pg: svbool2_t) -> i64;
32884    }
32885    unsafe { _svqdecp_n_u64_b64(op.as_signed(), pg.sve_into()).as_unsigned() }
32886}
32887#[doc = "Saturating decrement by active element count"]
32888#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s16])"]
32889#[inline(always)]
32890#[target_feature(enable = "sve")]
32891#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32892#[cfg_attr(test, assert_instr(sqdecp))]
32893pub fn svqdecp_s16(op: svint16_t, pg: svbool_t) -> svint16_t {
32894    unsafe extern "unadjusted" {
32895        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv8i16")]
32896        fn _svqdecp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t;
32897    }
32898    unsafe { _svqdecp_s16(op, pg.sve_into()) }
32899}
32900#[doc = "Saturating decrement by active element count"]
32901#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s32])"]
32902#[inline(always)]
32903#[target_feature(enable = "sve")]
32904#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32905#[cfg_attr(test, assert_instr(sqdecp))]
32906pub fn svqdecp_s32(op: svint32_t, pg: svbool_t) -> svint32_t {
32907    unsafe extern "unadjusted" {
32908        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv4i32")]
32909        fn _svqdecp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t;
32910    }
32911    unsafe { _svqdecp_s32(op, pg.sve_into()) }
32912}
32913#[doc = "Saturating decrement by active element count"]
32914#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s64])"]
32915#[inline(always)]
32916#[target_feature(enable = "sve")]
32917#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32918#[cfg_attr(test, assert_instr(sqdecp))]
32919pub fn svqdecp_s64(op: svint64_t, pg: svbool_t) -> svint64_t {
32920    unsafe extern "unadjusted" {
32921        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv2i64")]
32922        fn _svqdecp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t;
32923    }
32924    unsafe { _svqdecp_s64(op, pg.sve_into()) }
32925}
32926#[doc = "Saturating decrement by active element count"]
32927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u16])"]
32928#[inline(always)]
32929#[target_feature(enable = "sve")]
32930#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32931#[cfg_attr(test, assert_instr(uqdecp))]
32932pub fn svqdecp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t {
32933    unsafe extern "unadjusted" {
32934        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv8i16")]
32935        fn _svqdecp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t;
32936    }
32937    unsafe { _svqdecp_u16(op.as_signed(), pg.sve_into()).as_unsigned() }
32938}
32939#[doc = "Saturating decrement by active element count"]
32940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u32])"]
32941#[inline(always)]
32942#[target_feature(enable = "sve")]
32943#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32944#[cfg_attr(test, assert_instr(uqdecp))]
32945pub fn svqdecp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t {
32946    unsafe extern "unadjusted" {
32947        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv4i32")]
32948        fn _svqdecp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t;
32949    }
32950    unsafe { _svqdecp_u32(op.as_signed(), pg.sve_into()).as_unsigned() }
32951}
32952#[doc = "Saturating decrement by active element count"]
32953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u64])"]
32954#[inline(always)]
32955#[target_feature(enable = "sve")]
32956#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32957#[cfg_attr(test, assert_instr(uqdecp))]
32958pub fn svqdecp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t {
32959    unsafe extern "unadjusted" {
32960        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv2i64")]
32961        fn _svqdecp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t;
32962    }
32963    unsafe { _svqdecp_u64(op.as_signed(), pg.sve_into()).as_unsigned() }
32964}
32965#[doc = "Saturating increment by number of byte elements"]
32966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s32])"]
32967#[inline(always)]
32968#[target_feature(enable = "sve")]
32969#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32970#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))]
32971pub fn svqincb_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
32972    svqincb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32973}
32974#[doc = "Saturating increment by number of halfword elements"]
32975#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s32])"]
32976#[inline(always)]
32977#[target_feature(enable = "sve")]
32978#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32979#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))]
32980pub fn svqinch_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
32981    svqinch_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32982}
32983#[doc = "Saturating increment by number of word elements"]
32984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s32])"]
32985#[inline(always)]
32986#[target_feature(enable = "sve")]
32987#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32988#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))]
32989pub fn svqincw_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
32990    svqincw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
32991}
32992#[doc = "Saturating increment by number of doubleword elements"]
32993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s32])"]
32994#[inline(always)]
32995#[target_feature(enable = "sve")]
32996#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
32997#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))]
32998pub fn svqincd_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
32999    svqincd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33000}
33001#[doc = "Saturating increment by number of byte elements"]
33002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s64])"]
33003#[inline(always)]
33004#[target_feature(enable = "sve")]
33005#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33006#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))]
33007pub fn svqincb_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
33008    svqincb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33009}
33010#[doc = "Saturating increment by number of halfword elements"]
33011#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s64])"]
33012#[inline(always)]
33013#[target_feature(enable = "sve")]
33014#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33015#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))]
33016pub fn svqinch_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
33017    svqinch_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33018}
33019#[doc = "Saturating increment by number of word elements"]
33020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s64])"]
33021#[inline(always)]
33022#[target_feature(enable = "sve")]
33023#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33024#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))]
33025pub fn svqincw_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
33026    svqincw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33027}
33028#[doc = "Saturating increment by number of doubleword elements"]
33029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s64])"]
33030#[inline(always)]
33031#[target_feature(enable = "sve")]
33032#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33033#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))]
33034pub fn svqincd_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
33035    svqincd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33036}
33037#[doc = "Saturating increment by number of byte elements"]
33038#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u32])"]
33039#[inline(always)]
33040#[target_feature(enable = "sve")]
33041#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33042#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))]
33043pub fn svqincb_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
33044    svqincb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33045}
33046#[doc = "Saturating increment by number of halfword elements"]
33047#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u32])"]
33048#[inline(always)]
33049#[target_feature(enable = "sve")]
33050#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33051#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))]
33052pub fn svqinch_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
33053    svqinch_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33054}
33055#[doc = "Saturating increment by number of word elements"]
33056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u32])"]
33057#[inline(always)]
33058#[target_feature(enable = "sve")]
33059#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33060#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))]
33061pub fn svqincw_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
33062    svqincw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33063}
33064#[doc = "Saturating increment by number of doubleword elements"]
33065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u32])"]
33066#[inline(always)]
33067#[target_feature(enable = "sve")]
33068#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33069#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))]
33070pub fn svqincd_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
33071    svqincd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33072}
33073#[doc = "Saturating increment by number of byte elements"]
33074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u64])"]
33075#[inline(always)]
33076#[target_feature(enable = "sve")]
33077#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33078#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))]
33079pub fn svqincb_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
33080    svqincb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33081}
33082#[doc = "Saturating increment by number of halfword elements"]
33083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u64])"]
33084#[inline(always)]
33085#[target_feature(enable = "sve")]
33086#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33087#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))]
33088pub fn svqinch_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
33089    svqinch_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33090}
33091#[doc = "Saturating increment by number of word elements"]
33092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u64])"]
33093#[inline(always)]
33094#[target_feature(enable = "sve")]
33095#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33096#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))]
33097pub fn svqincw_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
33098    svqincw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33099}
33100#[doc = "Saturating increment by number of doubleword elements"]
33101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u64])"]
33102#[inline(always)]
33103#[target_feature(enable = "sve")]
33104#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33105#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))]
33106pub fn svqincd_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
33107    svqincd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33108}
33109#[doc = "Saturating increment by number of byte elements"]
33110#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s32])"]
33111#[inline(always)]
33112#[target_feature(enable = "sve")]
33113#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33114# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33115pub fn svqincb_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
33116    static_assert_range!(IMM_FACTOR, 1..=16);
33117    unsafe extern "unadjusted" {
33118        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n32")]
33119        fn _svqincb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
33120    }
33121    unsafe { _svqincb_pat_n_s32(op, PATTERN, IMM_FACTOR) }
33122}
33123#[doc = "Saturating increment by number of halfword elements"]
33124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s32])"]
33125#[inline(always)]
33126#[target_feature(enable = "sve")]
33127#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33128# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33129pub fn svqinch_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
33130    static_assert_range!(IMM_FACTOR, 1..=16);
33131    unsafe extern "unadjusted" {
33132        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n32")]
33133        fn _svqinch_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
33134    }
33135    unsafe { _svqinch_pat_n_s32(op, PATTERN, IMM_FACTOR) }
33136}
33137#[doc = "Saturating increment by number of word elements"]
33138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s32])"]
33139#[inline(always)]
33140#[target_feature(enable = "sve")]
33141#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33142# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33143pub fn svqincw_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
33144    static_assert_range!(IMM_FACTOR, 1..=16);
33145    unsafe extern "unadjusted" {
33146        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n32")]
33147        fn _svqincw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
33148    }
33149    unsafe { _svqincw_pat_n_s32(op, PATTERN, IMM_FACTOR) }
33150}
33151#[doc = "Saturating increment by number of doubleword elements"]
33152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s32])"]
33153#[inline(always)]
33154#[target_feature(enable = "sve")]
33155#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33156# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33157pub fn svqincd_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
33158    static_assert_range!(IMM_FACTOR, 1..=16);
33159    unsafe extern "unadjusted" {
33160        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n32")]
33161        fn _svqincd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
33162    }
33163    unsafe { _svqincd_pat_n_s32(op, PATTERN, IMM_FACTOR) }
33164}
33165#[doc = "Saturating increment by number of byte elements"]
33166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s64])"]
33167#[inline(always)]
33168#[target_feature(enable = "sve")]
33169#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33170# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33171pub fn svqincb_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
33172    static_assert_range!(IMM_FACTOR, 1..=16);
33173    unsafe extern "unadjusted" {
33174        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n64")]
33175        fn _svqincb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
33176    }
33177    unsafe { _svqincb_pat_n_s64(op, PATTERN, IMM_FACTOR) }
33178}
33179#[doc = "Saturating increment by number of halfword elements"]
33180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s64])"]
33181#[inline(always)]
33182#[target_feature(enable = "sve")]
33183#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33184# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33185pub fn svqinch_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
33186    static_assert_range!(IMM_FACTOR, 1..=16);
33187    unsafe extern "unadjusted" {
33188        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n64")]
33189        fn _svqinch_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
33190    }
33191    unsafe { _svqinch_pat_n_s64(op, PATTERN, IMM_FACTOR) }
33192}
33193#[doc = "Saturating increment by number of word elements"]
33194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s64])"]
33195#[inline(always)]
33196#[target_feature(enable = "sve")]
33197#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33198# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33199pub fn svqincw_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
33200    static_assert_range!(IMM_FACTOR, 1..=16);
33201    unsafe extern "unadjusted" {
33202        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n64")]
33203        fn _svqincw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
33204    }
33205    unsafe { _svqincw_pat_n_s64(op, PATTERN, IMM_FACTOR) }
33206}
33207#[doc = "Saturating increment by number of doubleword elements"]
33208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s64])"]
33209#[inline(always)]
33210#[target_feature(enable = "sve")]
33211#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33212# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33213pub fn svqincd_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
33214    static_assert_range!(IMM_FACTOR, 1..=16);
33215    unsafe extern "unadjusted" {
33216        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n64")]
33217        fn _svqincd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
33218    }
33219    unsafe { _svqincd_pat_n_s64(op, PATTERN, IMM_FACTOR) }
33220}
33221#[doc = "Saturating increment by number of byte elements"]
33222#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u32])"]
33223#[inline(always)]
33224#[target_feature(enable = "sve")]
33225#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33226# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33227pub fn svqincb_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
33228    static_assert_range!(IMM_FACTOR, 1..=16);
33229    unsafe extern "unadjusted" {
33230        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n32")]
33231        fn _svqincb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
33232    }
33233    unsafe { _svqincb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
33234}
33235#[doc = "Saturating increment by number of halfword elements"]
33236#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u32])"]
33237#[inline(always)]
33238#[target_feature(enable = "sve")]
33239#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33240# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33241pub fn svqinch_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
33242    static_assert_range!(IMM_FACTOR, 1..=16);
33243    unsafe extern "unadjusted" {
33244        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n32")]
33245        fn _svqinch_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
33246    }
33247    unsafe { _svqinch_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
33248}
33249#[doc = "Saturating increment by number of word elements"]
33250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u32])"]
33251#[inline(always)]
33252#[target_feature(enable = "sve")]
33253#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33254# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33255pub fn svqincw_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
33256    static_assert_range!(IMM_FACTOR, 1..=16);
33257    unsafe extern "unadjusted" {
33258        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n32")]
33259        fn _svqincw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
33260    }
33261    unsafe { _svqincw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
33262}
33263#[doc = "Saturating increment by number of doubleword elements"]
33264#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u32])"]
33265#[inline(always)]
33266#[target_feature(enable = "sve")]
33267#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33268# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33269pub fn svqincd_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
33270    static_assert_range!(IMM_FACTOR, 1..=16);
33271    unsafe extern "unadjusted" {
33272        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n32")]
33273        fn _svqincd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
33274    }
33275    unsafe { _svqincd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
33276}
33277#[doc = "Saturating increment by number of byte elements"]
33278#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u64])"]
33279#[inline(always)]
33280#[target_feature(enable = "sve")]
33281#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33282# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33283pub fn svqincb_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
33284    static_assert_range!(IMM_FACTOR, 1..=16);
33285    unsafe extern "unadjusted" {
33286        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n64")]
33287        fn _svqincb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
33288    }
33289    unsafe { _svqincb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
33290}
33291#[doc = "Saturating increment by number of halfword elements"]
33292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u64])"]
33293#[inline(always)]
33294#[target_feature(enable = "sve")]
33295#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33296# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33297pub fn svqinch_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
33298    static_assert_range!(IMM_FACTOR, 1..=16);
33299    unsafe extern "unadjusted" {
33300        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n64")]
33301        fn _svqinch_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
33302    }
33303    unsafe { _svqinch_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
33304}
33305#[doc = "Saturating increment by number of word elements"]
33306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u64])"]
33307#[inline(always)]
33308#[target_feature(enable = "sve")]
33309#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33310# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33311pub fn svqincw_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
33312    static_assert_range!(IMM_FACTOR, 1..=16);
33313    unsafe extern "unadjusted" {
33314        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n64")]
33315        fn _svqincw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
33316    }
33317    unsafe { _svqincw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
33318}
33319#[doc = "Saturating increment by number of doubleword elements"]
33320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u64])"]
33321#[inline(always)]
33322#[target_feature(enable = "sve")]
33323#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33324# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33325pub fn svqincd_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
33326    static_assert_range!(IMM_FACTOR, 1..=16);
33327    unsafe extern "unadjusted" {
33328        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n64")]
33329        fn _svqincd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
33330    }
33331    unsafe { _svqincd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
33332}
33333#[doc = "Saturating increment by number of halfword elements"]
33334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_s16])"]
33335#[inline(always)]
33336#[target_feature(enable = "sve")]
33337#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33338# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33339pub fn svqinch_pat_s16<const PATTERN: svpattern, const IMM_FACTOR: i32>(
33340    op: svint16_t,
33341) -> svint16_t {
33342    static_assert_range!(IMM_FACTOR, 1..=16);
33343    unsafe extern "unadjusted" {
33344        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.nxv8i16")]
33345        fn _svqinch_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t;
33346    }
33347    unsafe { _svqinch_pat_s16(op, PATTERN, IMM_FACTOR) }
33348}
33349#[doc = "Saturating increment by number of word elements"]
33350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_s32])"]
33351#[inline(always)]
33352#[target_feature(enable = "sve")]
33353#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33354# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33355pub fn svqincw_pat_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(
33356    op: svint32_t,
33357) -> svint32_t {
33358    static_assert_range!(IMM_FACTOR, 1..=16);
33359    unsafe extern "unadjusted" {
33360        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.nxv4i32")]
33361        fn _svqincw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t;
33362    }
33363    unsafe { _svqincw_pat_s32(op, PATTERN, IMM_FACTOR) }
33364}
33365#[doc = "Saturating increment by number of doubleword elements"]
33366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_s64])"]
33367#[inline(always)]
33368#[target_feature(enable = "sve")]
33369#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33370# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33371pub fn svqincd_pat_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(
33372    op: svint64_t,
33373) -> svint64_t {
33374    static_assert_range!(IMM_FACTOR, 1..=16);
33375    unsafe extern "unadjusted" {
33376        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.nxv2i64")]
33377        fn _svqincd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t;
33378    }
33379    unsafe { _svqincd_pat_s64(op, PATTERN, IMM_FACTOR) }
33380}
33381#[doc = "Saturating increment by number of halfword elements"]
33382#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_u16])"]
33383#[inline(always)]
33384#[target_feature(enable = "sve")]
33385#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33386# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33387pub fn svqinch_pat_u16<const PATTERN: svpattern, const IMM_FACTOR: i32>(
33388    op: svuint16_t,
33389) -> svuint16_t {
33390    static_assert_range!(IMM_FACTOR, 1..=16);
33391    unsafe extern "unadjusted" {
33392        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.nxv8i16")]
33393        fn _svqinch_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t;
33394    }
33395    unsafe { _svqinch_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
33396}
33397#[doc = "Saturating increment by number of word elements"]
33398#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_u32])"]
33399#[inline(always)]
33400#[target_feature(enable = "sve")]
33401#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33402# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33403pub fn svqincw_pat_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(
33404    op: svuint32_t,
33405) -> svuint32_t {
33406    static_assert_range!(IMM_FACTOR, 1..=16);
33407    unsafe extern "unadjusted" {
33408        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.nxv4i32")]
33409        fn _svqincw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t;
33410    }
33411    unsafe { _svqincw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
33412}
33413#[doc = "Saturating increment by number of doubleword elements"]
33414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_u64])"]
33415#[inline(always)]
33416#[target_feature(enable = "sve")]
33417#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33418# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
33419pub fn svqincd_pat_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(
33420    op: svuint64_t,
33421) -> svuint64_t {
33422    static_assert_range!(IMM_FACTOR, 1..=16);
33423    unsafe extern "unadjusted" {
33424        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.nxv2i64")]
33425        fn _svqincd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t;
33426    }
33427    unsafe { _svqincd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
33428}
33429#[doc = "Saturating increment by number of halfword elements"]
33430#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_s16])"]
33431#[inline(always)]
33432#[target_feature(enable = "sve")]
33433#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33434#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))]
33435pub fn svqinch_s16<const IMM_FACTOR: i32>(op: svint16_t) -> svint16_t {
33436    svqinch_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33437}
33438#[doc = "Saturating increment by number of word elements"]
33439#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_s32])"]
33440#[inline(always)]
33441#[target_feature(enable = "sve")]
33442#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33443#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))]
33444pub fn svqincw_s32<const IMM_FACTOR: i32>(op: svint32_t) -> svint32_t {
33445    svqincw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33446}
33447#[doc = "Saturating increment by number of doubleword elements"]
33448#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_s64])"]
33449#[inline(always)]
33450#[target_feature(enable = "sve")]
33451#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33452#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))]
33453pub fn svqincd_s64<const IMM_FACTOR: i32>(op: svint64_t) -> svint64_t {
33454    svqincd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33455}
33456#[doc = "Saturating increment by number of halfword elements"]
33457#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_u16])"]
33458#[inline(always)]
33459#[target_feature(enable = "sve")]
33460#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33461#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))]
33462pub fn svqinch_u16<const IMM_FACTOR: i32>(op: svuint16_t) -> svuint16_t {
33463    svqinch_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33464}
33465#[doc = "Saturating increment by number of word elements"]
33466#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_u32])"]
33467#[inline(always)]
33468#[target_feature(enable = "sve")]
33469#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33470#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))]
33471pub fn svqincw_u32<const IMM_FACTOR: i32>(op: svuint32_t) -> svuint32_t {
33472    svqincw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33473}
33474#[doc = "Saturating increment by number of doubleword elements"]
33475#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_u64])"]
33476#[inline(always)]
33477#[target_feature(enable = "sve")]
33478#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33479#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))]
33480pub fn svqincd_u64<const IMM_FACTOR: i32>(op: svuint64_t) -> svuint64_t {
33481    svqincd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
33482}
33483#[doc = "Saturating increment by active element count"]
33484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b8)"]
33485#[inline(always)]
33486#[target_feature(enable = "sve")]
33487#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33488#[cfg_attr(test, assert_instr(sqincp))]
33489pub fn svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32 {
33490    unsafe extern "unadjusted" {
33491        #[cfg_attr(
33492            target_arch = "aarch64",
33493            link_name = "llvm.aarch64.sve.sqincp.n32.nxv16i1"
33494        )]
33495        fn _svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32;
33496    }
33497    unsafe { _svqincp_n_s32_b8(op, pg) }
33498}
33499#[doc = "Saturating increment by active element count"]
33500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b16)"]
33501#[inline(always)]
33502#[target_feature(enable = "sve")]
33503#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33504#[cfg_attr(test, assert_instr(sqincp))]
33505pub fn svqincp_n_s32_b16(op: i32, pg: svbool_t) -> i32 {
33506    unsafe extern "unadjusted" {
33507        #[cfg_attr(
33508            target_arch = "aarch64",
33509            link_name = "llvm.aarch64.sve.sqincp.n32.nxv8i1"
33510        )]
33511        fn _svqincp_n_s32_b16(op: i32, pg: svbool8_t) -> i32;
33512    }
33513    unsafe { _svqincp_n_s32_b16(op, pg.sve_into()) }
33514}
33515#[doc = "Saturating increment by active element count"]
33516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b32)"]
33517#[inline(always)]
33518#[target_feature(enable = "sve")]
33519#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33520#[cfg_attr(test, assert_instr(sqincp))]
33521pub fn svqincp_n_s32_b32(op: i32, pg: svbool_t) -> i32 {
33522    unsafe extern "unadjusted" {
33523        #[cfg_attr(
33524            target_arch = "aarch64",
33525            link_name = "llvm.aarch64.sve.sqincp.n32.nxv4i1"
33526        )]
33527        fn _svqincp_n_s32_b32(op: i32, pg: svbool4_t) -> i32;
33528    }
33529    unsafe { _svqincp_n_s32_b32(op, pg.sve_into()) }
33530}
33531#[doc = "Saturating increment by active element count"]
33532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b64)"]
33533#[inline(always)]
33534#[target_feature(enable = "sve")]
33535#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33536#[cfg_attr(test, assert_instr(sqincp))]
33537pub fn svqincp_n_s32_b64(op: i32, pg: svbool_t) -> i32 {
33538    unsafe extern "unadjusted" {
33539        #[cfg_attr(
33540            target_arch = "aarch64",
33541            link_name = "llvm.aarch64.sve.sqincp.n32.nxv2i1"
33542        )]
33543        fn _svqincp_n_s32_b64(op: i32, pg: svbool2_t) -> i32;
33544    }
33545    unsafe { _svqincp_n_s32_b64(op, pg.sve_into()) }
33546}
33547#[doc = "Saturating increment by active element count"]
33548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b8)"]
33549#[inline(always)]
33550#[target_feature(enable = "sve")]
33551#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33552#[cfg_attr(test, assert_instr(sqincp))]
33553pub fn svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64 {
33554    unsafe extern "unadjusted" {
33555        #[cfg_attr(
33556            target_arch = "aarch64",
33557            link_name = "llvm.aarch64.sve.sqincp.n64.nxv16i1"
33558        )]
33559        fn _svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64;
33560    }
33561    unsafe { _svqincp_n_s64_b8(op, pg) }
33562}
33563#[doc = "Saturating increment by active element count"]
33564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b16)"]
33565#[inline(always)]
33566#[target_feature(enable = "sve")]
33567#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33568#[cfg_attr(test, assert_instr(sqincp))]
33569pub fn svqincp_n_s64_b16(op: i64, pg: svbool_t) -> i64 {
33570    unsafe extern "unadjusted" {
33571        #[cfg_attr(
33572            target_arch = "aarch64",
33573            link_name = "llvm.aarch64.sve.sqincp.n64.nxv8i1"
33574        )]
33575        fn _svqincp_n_s64_b16(op: i64, pg: svbool8_t) -> i64;
33576    }
33577    unsafe { _svqincp_n_s64_b16(op, pg.sve_into()) }
33578}
33579#[doc = "Saturating increment by active element count"]
33580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b32)"]
33581#[inline(always)]
33582#[target_feature(enable = "sve")]
33583#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33584#[cfg_attr(test, assert_instr(sqincp))]
33585pub fn svqincp_n_s64_b32(op: i64, pg: svbool_t) -> i64 {
33586    unsafe extern "unadjusted" {
33587        #[cfg_attr(
33588            target_arch = "aarch64",
33589            link_name = "llvm.aarch64.sve.sqincp.n64.nxv4i1"
33590        )]
33591        fn _svqincp_n_s64_b32(op: i64, pg: svbool4_t) -> i64;
33592    }
33593    unsafe { _svqincp_n_s64_b32(op, pg.sve_into()) }
33594}
33595#[doc = "Saturating increment by active element count"]
33596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b64)"]
33597#[inline(always)]
33598#[target_feature(enable = "sve")]
33599#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33600#[cfg_attr(test, assert_instr(sqincp))]
33601pub fn svqincp_n_s64_b64(op: i64, pg: svbool_t) -> i64 {
33602    unsafe extern "unadjusted" {
33603        #[cfg_attr(
33604            target_arch = "aarch64",
33605            link_name = "llvm.aarch64.sve.sqincp.n64.nxv2i1"
33606        )]
33607        fn _svqincp_n_s64_b64(op: i64, pg: svbool2_t) -> i64;
33608    }
33609    unsafe { _svqincp_n_s64_b64(op, pg.sve_into()) }
33610}
33611#[doc = "Saturating increment by active element count"]
33612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b8)"]
33613#[inline(always)]
33614#[target_feature(enable = "sve")]
33615#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33616#[cfg_attr(test, assert_instr(uqincp))]
33617pub fn svqincp_n_u32_b8(op: u32, pg: svbool_t) -> u32 {
33618    unsafe extern "unadjusted" {
33619        #[cfg_attr(
33620            target_arch = "aarch64",
33621            link_name = "llvm.aarch64.sve.uqincp.n32.nxv16i1"
33622        )]
33623        fn _svqincp_n_u32_b8(op: i32, pg: svbool_t) -> i32;
33624    }
33625    unsafe { _svqincp_n_u32_b8(op.as_signed(), pg).as_unsigned() }
33626}
33627#[doc = "Saturating increment by active element count"]
33628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b16)"]
33629#[inline(always)]
33630#[target_feature(enable = "sve")]
33631#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33632#[cfg_attr(test, assert_instr(uqincp))]
33633pub fn svqincp_n_u32_b16(op: u32, pg: svbool_t) -> u32 {
33634    unsafe extern "unadjusted" {
33635        #[cfg_attr(
33636            target_arch = "aarch64",
33637            link_name = "llvm.aarch64.sve.uqincp.n32.nxv8i1"
33638        )]
33639        fn _svqincp_n_u32_b16(op: i32, pg: svbool8_t) -> i32;
33640    }
33641    unsafe { _svqincp_n_u32_b16(op.as_signed(), pg.sve_into()).as_unsigned() }
33642}
33643#[doc = "Saturating increment by active element count"]
33644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b32)"]
33645#[inline(always)]
33646#[target_feature(enable = "sve")]
33647#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33648#[cfg_attr(test, assert_instr(uqincp))]
33649pub fn svqincp_n_u32_b32(op: u32, pg: svbool_t) -> u32 {
33650    unsafe extern "unadjusted" {
33651        #[cfg_attr(
33652            target_arch = "aarch64",
33653            link_name = "llvm.aarch64.sve.uqincp.n32.nxv4i1"
33654        )]
33655        fn _svqincp_n_u32_b32(op: i32, pg: svbool4_t) -> i32;
33656    }
33657    unsafe { _svqincp_n_u32_b32(op.as_signed(), pg.sve_into()).as_unsigned() }
33658}
33659#[doc = "Saturating increment by active element count"]
33660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b64)"]
33661#[inline(always)]
33662#[target_feature(enable = "sve")]
33663#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33664#[cfg_attr(test, assert_instr(uqincp))]
33665pub fn svqincp_n_u32_b64(op: u32, pg: svbool_t) -> u32 {
33666    unsafe extern "unadjusted" {
33667        #[cfg_attr(
33668            target_arch = "aarch64",
33669            link_name = "llvm.aarch64.sve.uqincp.n32.nxv2i1"
33670        )]
33671        fn _svqincp_n_u32_b64(op: i32, pg: svbool2_t) -> i32;
33672    }
33673    unsafe { _svqincp_n_u32_b64(op.as_signed(), pg.sve_into()).as_unsigned() }
33674}
33675#[doc = "Saturating increment by active element count"]
33676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b8)"]
33677#[inline(always)]
33678#[target_feature(enable = "sve")]
33679#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33680#[cfg_attr(test, assert_instr(uqincp))]
33681pub fn svqincp_n_u64_b8(op: u64, pg: svbool_t) -> u64 {
33682    unsafe extern "unadjusted" {
33683        #[cfg_attr(
33684            target_arch = "aarch64",
33685            link_name = "llvm.aarch64.sve.uqincp.n64.nxv16i1"
33686        )]
33687        fn _svqincp_n_u64_b8(op: i64, pg: svbool_t) -> i64;
33688    }
33689    unsafe { _svqincp_n_u64_b8(op.as_signed(), pg).as_unsigned() }
33690}
33691#[doc = "Saturating increment by active element count"]
33692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b16)"]
33693#[inline(always)]
33694#[target_feature(enable = "sve")]
33695#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33696#[cfg_attr(test, assert_instr(uqincp))]
33697pub fn svqincp_n_u64_b16(op: u64, pg: svbool_t) -> u64 {
33698    unsafe extern "unadjusted" {
33699        #[cfg_attr(
33700            target_arch = "aarch64",
33701            link_name = "llvm.aarch64.sve.uqincp.n64.nxv8i1"
33702        )]
33703        fn _svqincp_n_u64_b16(op: i64, pg: svbool8_t) -> i64;
33704    }
33705    unsafe { _svqincp_n_u64_b16(op.as_signed(), pg.sve_into()).as_unsigned() }
33706}
33707#[doc = "Saturating increment by active element count"]
33708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b32)"]
33709#[inline(always)]
33710#[target_feature(enable = "sve")]
33711#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33712#[cfg_attr(test, assert_instr(uqincp))]
33713pub fn svqincp_n_u64_b32(op: u64, pg: svbool_t) -> u64 {
33714    unsafe extern "unadjusted" {
33715        #[cfg_attr(
33716            target_arch = "aarch64",
33717            link_name = "llvm.aarch64.sve.uqincp.n64.nxv4i1"
33718        )]
33719        fn _svqincp_n_u64_b32(op: i64, pg: svbool4_t) -> i64;
33720    }
33721    unsafe { _svqincp_n_u64_b32(op.as_signed(), pg.sve_into()).as_unsigned() }
33722}
33723#[doc = "Saturating increment by active element count"]
33724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b64)"]
33725#[inline(always)]
33726#[target_feature(enable = "sve")]
33727#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33728#[cfg_attr(test, assert_instr(uqincp))]
33729pub fn svqincp_n_u64_b64(op: u64, pg: svbool_t) -> u64 {
33730    unsafe extern "unadjusted" {
33731        #[cfg_attr(
33732            target_arch = "aarch64",
33733            link_name = "llvm.aarch64.sve.uqincp.n64.nxv2i1"
33734        )]
33735        fn _svqincp_n_u64_b64(op: i64, pg: svbool2_t) -> i64;
33736    }
33737    unsafe { _svqincp_n_u64_b64(op.as_signed(), pg.sve_into()).as_unsigned() }
33738}
33739#[doc = "Saturating increment by active element count"]
33740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s16])"]
33741#[inline(always)]
33742#[target_feature(enable = "sve")]
33743#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33744#[cfg_attr(test, assert_instr(sqincp))]
33745pub fn svqincp_s16(op: svint16_t, pg: svbool_t) -> svint16_t {
33746    unsafe extern "unadjusted" {
33747        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv8i16")]
33748        fn _svqincp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t;
33749    }
33750    unsafe { _svqincp_s16(op, pg.sve_into()) }
33751}
33752#[doc = "Saturating increment by active element count"]
33753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s32])"]
33754#[inline(always)]
33755#[target_feature(enable = "sve")]
33756#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33757#[cfg_attr(test, assert_instr(sqincp))]
33758pub fn svqincp_s32(op: svint32_t, pg: svbool_t) -> svint32_t {
33759    unsafe extern "unadjusted" {
33760        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv4i32")]
33761        fn _svqincp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t;
33762    }
33763    unsafe { _svqincp_s32(op, pg.sve_into()) }
33764}
33765#[doc = "Saturating increment by active element count"]
33766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s64])"]
33767#[inline(always)]
33768#[target_feature(enable = "sve")]
33769#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33770#[cfg_attr(test, assert_instr(sqincp))]
33771pub fn svqincp_s64(op: svint64_t, pg: svbool_t) -> svint64_t {
33772    unsafe extern "unadjusted" {
33773        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv2i64")]
33774        fn _svqincp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t;
33775    }
33776    unsafe { _svqincp_s64(op, pg.sve_into()) }
33777}
33778#[doc = "Saturating increment by active element count"]
33779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u16])"]
33780#[inline(always)]
33781#[target_feature(enable = "sve")]
33782#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33783#[cfg_attr(test, assert_instr(uqincp))]
33784pub fn svqincp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t {
33785    unsafe extern "unadjusted" {
33786        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv8i16")]
33787        fn _svqincp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t;
33788    }
33789    unsafe { _svqincp_u16(op.as_signed(), pg.sve_into()).as_unsigned() }
33790}
33791#[doc = "Saturating increment by active element count"]
33792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u32])"]
33793#[inline(always)]
33794#[target_feature(enable = "sve")]
33795#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33796#[cfg_attr(test, assert_instr(uqincp))]
33797pub fn svqincp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t {
33798    unsafe extern "unadjusted" {
33799        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv4i32")]
33800        fn _svqincp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t;
33801    }
33802    unsafe { _svqincp_u32(op.as_signed(), pg.sve_into()).as_unsigned() }
33803}
33804#[doc = "Saturating increment by active element count"]
33805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u64])"]
33806#[inline(always)]
33807#[target_feature(enable = "sve")]
33808#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33809#[cfg_attr(test, assert_instr(uqincp))]
33810pub fn svqincp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t {
33811    unsafe extern "unadjusted" {
33812        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv2i64")]
33813        fn _svqincp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t;
33814    }
33815    unsafe { _svqincp_u64(op.as_signed(), pg.sve_into()).as_unsigned() }
33816}
33817#[doc = "Saturating subtract"]
33818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8])"]
33819#[inline(always)]
33820#[target_feature(enable = "sve")]
33821#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33822#[cfg_attr(test, assert_instr(sqsub))]
33823pub fn svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
33824    unsafe extern "unadjusted" {
33825        #[cfg_attr(
33826            target_arch = "aarch64",
33827            link_name = "llvm.aarch64.sve.sqsub.x.nxv16i8"
33828        )]
33829        fn _svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
33830    }
33831    unsafe { _svqsub_s8(op1, op2) }
33832}
33833#[doc = "Saturating subtract"]
33834#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8])"]
33835#[inline(always)]
33836#[target_feature(enable = "sve")]
33837#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33838#[cfg_attr(test, assert_instr(sqsub))]
33839pub fn svqsub_n_s8(op1: svint8_t, op2: i8) -> svint8_t {
33840    svqsub_s8(op1, svdup_n_s8(op2))
33841}
33842#[doc = "Saturating subtract"]
33843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16])"]
33844#[inline(always)]
33845#[target_feature(enable = "sve")]
33846#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33847#[cfg_attr(test, assert_instr(sqsub))]
33848pub fn svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
33849    unsafe extern "unadjusted" {
33850        #[cfg_attr(
33851            target_arch = "aarch64",
33852            link_name = "llvm.aarch64.sve.sqsub.x.nxv8i16"
33853        )]
33854        fn _svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
33855    }
33856    unsafe { _svqsub_s16(op1, op2) }
33857}
33858#[doc = "Saturating subtract"]
33859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16])"]
33860#[inline(always)]
33861#[target_feature(enable = "sve")]
33862#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33863#[cfg_attr(test, assert_instr(sqsub))]
33864pub fn svqsub_n_s16(op1: svint16_t, op2: i16) -> svint16_t {
33865    svqsub_s16(op1, svdup_n_s16(op2))
33866}
33867#[doc = "Saturating subtract"]
33868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32])"]
33869#[inline(always)]
33870#[target_feature(enable = "sve")]
33871#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33872#[cfg_attr(test, assert_instr(sqsub))]
33873pub fn svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
33874    unsafe extern "unadjusted" {
33875        #[cfg_attr(
33876            target_arch = "aarch64",
33877            link_name = "llvm.aarch64.sve.sqsub.x.nxv4i32"
33878        )]
33879        fn _svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
33880    }
33881    unsafe { _svqsub_s32(op1, op2) }
33882}
33883#[doc = "Saturating subtract"]
33884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32])"]
33885#[inline(always)]
33886#[target_feature(enable = "sve")]
33887#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33888#[cfg_attr(test, assert_instr(sqsub))]
33889pub fn svqsub_n_s32(op1: svint32_t, op2: i32) -> svint32_t {
33890    svqsub_s32(op1, svdup_n_s32(op2))
33891}
33892#[doc = "Saturating subtract"]
33893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64])"]
33894#[inline(always)]
33895#[target_feature(enable = "sve")]
33896#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33897#[cfg_attr(test, assert_instr(sqsub))]
33898pub fn svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
33899    unsafe extern "unadjusted" {
33900        #[cfg_attr(
33901            target_arch = "aarch64",
33902            link_name = "llvm.aarch64.sve.sqsub.x.nxv2i64"
33903        )]
33904        fn _svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
33905    }
33906    unsafe { _svqsub_s64(op1, op2) }
33907}
33908#[doc = "Saturating subtract"]
33909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64])"]
33910#[inline(always)]
33911#[target_feature(enable = "sve")]
33912#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33913#[cfg_attr(test, assert_instr(sqsub))]
33914pub fn svqsub_n_s64(op1: svint64_t, op2: i64) -> svint64_t {
33915    svqsub_s64(op1, svdup_n_s64(op2))
33916}
33917#[doc = "Saturating subtract"]
33918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8])"]
33919#[inline(always)]
33920#[target_feature(enable = "sve")]
33921#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33922#[cfg_attr(test, assert_instr(uqsub))]
33923pub fn svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
33924    unsafe extern "unadjusted" {
33925        #[cfg_attr(
33926            target_arch = "aarch64",
33927            link_name = "llvm.aarch64.sve.uqsub.x.nxv16i8"
33928        )]
33929        fn _svqsub_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
33930    }
33931    unsafe { _svqsub_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
33932}
33933#[doc = "Saturating subtract"]
33934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8])"]
33935#[inline(always)]
33936#[target_feature(enable = "sve")]
33937#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33938#[cfg_attr(test, assert_instr(uqsub))]
33939pub fn svqsub_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
33940    svqsub_u8(op1, svdup_n_u8(op2))
33941}
33942#[doc = "Saturating subtract"]
33943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16])"]
33944#[inline(always)]
33945#[target_feature(enable = "sve")]
33946#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33947#[cfg_attr(test, assert_instr(uqsub))]
33948pub fn svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
33949    unsafe extern "unadjusted" {
33950        #[cfg_attr(
33951            target_arch = "aarch64",
33952            link_name = "llvm.aarch64.sve.uqsub.x.nxv8i16"
33953        )]
33954        fn _svqsub_u16(op1: svint16_t, op2: svint16_t) -> svint16_t;
33955    }
33956    unsafe { _svqsub_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
33957}
33958#[doc = "Saturating subtract"]
33959#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16])"]
33960#[inline(always)]
33961#[target_feature(enable = "sve")]
33962#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33963#[cfg_attr(test, assert_instr(uqsub))]
33964pub fn svqsub_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
33965    svqsub_u16(op1, svdup_n_u16(op2))
33966}
33967#[doc = "Saturating subtract"]
33968#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32])"]
33969#[inline(always)]
33970#[target_feature(enable = "sve")]
33971#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33972#[cfg_attr(test, assert_instr(uqsub))]
33973pub fn svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
33974    unsafe extern "unadjusted" {
33975        #[cfg_attr(
33976            target_arch = "aarch64",
33977            link_name = "llvm.aarch64.sve.uqsub.x.nxv4i32"
33978        )]
33979        fn _svqsub_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
33980    }
33981    unsafe { _svqsub_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
33982}
33983#[doc = "Saturating subtract"]
33984#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32])"]
33985#[inline(always)]
33986#[target_feature(enable = "sve")]
33987#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33988#[cfg_attr(test, assert_instr(uqsub))]
33989pub fn svqsub_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
33990    svqsub_u32(op1, svdup_n_u32(op2))
33991}
33992#[doc = "Saturating subtract"]
33993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64])"]
33994#[inline(always)]
33995#[target_feature(enable = "sve")]
33996#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
33997#[cfg_attr(test, assert_instr(uqsub))]
33998pub fn svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
33999    unsafe extern "unadjusted" {
34000        #[cfg_attr(
34001            target_arch = "aarch64",
34002            link_name = "llvm.aarch64.sve.uqsub.x.nxv2i64"
34003        )]
34004        fn _svqsub_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
34005    }
34006    unsafe { _svqsub_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
34007}
34008#[doc = "Saturating subtract"]
34009#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64])"]
34010#[inline(always)]
34011#[target_feature(enable = "sve")]
34012#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34013#[cfg_attr(test, assert_instr(uqsub))]
34014pub fn svqsub_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
34015    svqsub_u64(op1, svdup_n_u64(op2))
34016}
34017#[doc = "Reverse bits"]
34018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_m)"]
34019#[inline(always)]
34020#[target_feature(enable = "sve")]
34021#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34022#[cfg_attr(test, assert_instr(rbit))]
34023pub fn svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
34024    unsafe extern "unadjusted" {
34025        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv16i8")]
34026        fn _svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
34027    }
34028    unsafe { _svrbit_s8_m(inactive, pg, op) }
34029}
34030#[doc = "Reverse bits"]
34031#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_x)"]
34032#[inline(always)]
34033#[target_feature(enable = "sve")]
34034#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34035#[cfg_attr(test, assert_instr(rbit))]
34036pub fn svrbit_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
34037    svrbit_s8_m(op, pg, op)
34038}
34039#[doc = "Reverse bits"]
34040#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_z)"]
34041#[inline(always)]
34042#[target_feature(enable = "sve")]
34043#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34044#[cfg_attr(test, assert_instr(rbit))]
34045pub fn svrbit_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
34046    svrbit_s8_m(svdup_n_s8(0), pg, op)
34047}
34048#[doc = "Reverse bits"]
34049#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_m)"]
34050#[inline(always)]
34051#[target_feature(enable = "sve")]
34052#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34053#[cfg_attr(test, assert_instr(rbit))]
34054pub fn svrbit_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
34055    unsafe extern "unadjusted" {
34056        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv8i16")]
34057        fn _svrbit_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
34058    }
34059    unsafe { _svrbit_s16_m(inactive, pg.sve_into(), op) }
34060}
34061#[doc = "Reverse bits"]
34062#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_x)"]
34063#[inline(always)]
34064#[target_feature(enable = "sve")]
34065#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34066#[cfg_attr(test, assert_instr(rbit))]
34067pub fn svrbit_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
34068    svrbit_s16_m(op, pg, op)
34069}
34070#[doc = "Reverse bits"]
34071#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_z)"]
34072#[inline(always)]
34073#[target_feature(enable = "sve")]
34074#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34075#[cfg_attr(test, assert_instr(rbit))]
34076pub fn svrbit_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
34077    svrbit_s16_m(svdup_n_s16(0), pg, op)
34078}
34079#[doc = "Reverse bits"]
34080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_m)"]
34081#[inline(always)]
34082#[target_feature(enable = "sve")]
34083#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34084#[cfg_attr(test, assert_instr(rbit))]
34085pub fn svrbit_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
34086    unsafe extern "unadjusted" {
34087        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv4i32")]
34088        fn _svrbit_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
34089    }
34090    unsafe { _svrbit_s32_m(inactive, pg.sve_into(), op) }
34091}
34092#[doc = "Reverse bits"]
34093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_x)"]
34094#[inline(always)]
34095#[target_feature(enable = "sve")]
34096#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34097#[cfg_attr(test, assert_instr(rbit))]
34098pub fn svrbit_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
34099    svrbit_s32_m(op, pg, op)
34100}
34101#[doc = "Reverse bits"]
34102#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_z)"]
34103#[inline(always)]
34104#[target_feature(enable = "sve")]
34105#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34106#[cfg_attr(test, assert_instr(rbit))]
34107pub fn svrbit_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
34108    svrbit_s32_m(svdup_n_s32(0), pg, op)
34109}
34110#[doc = "Reverse bits"]
34111#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_m)"]
34112#[inline(always)]
34113#[target_feature(enable = "sve")]
34114#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34115#[cfg_attr(test, assert_instr(rbit))]
34116pub fn svrbit_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
34117    unsafe extern "unadjusted" {
34118        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv2i64")]
34119        fn _svrbit_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
34120    }
34121    unsafe { _svrbit_s64_m(inactive, pg.sve_into(), op) }
34122}
34123#[doc = "Reverse bits"]
34124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_x)"]
34125#[inline(always)]
34126#[target_feature(enable = "sve")]
34127#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34128#[cfg_attr(test, assert_instr(rbit))]
34129pub fn svrbit_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
34130    svrbit_s64_m(op, pg, op)
34131}
34132#[doc = "Reverse bits"]
34133#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_z)"]
34134#[inline(always)]
34135#[target_feature(enable = "sve")]
34136#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34137#[cfg_attr(test, assert_instr(rbit))]
34138pub fn svrbit_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
34139    svrbit_s64_m(svdup_n_s64(0), pg, op)
34140}
34141#[doc = "Reverse bits"]
34142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_m)"]
34143#[inline(always)]
34144#[target_feature(enable = "sve")]
34145#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34146#[cfg_attr(test, assert_instr(rbit))]
34147pub fn svrbit_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t {
34148    unsafe { svrbit_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
34149}
34150#[doc = "Reverse bits"]
34151#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_x)"]
34152#[inline(always)]
34153#[target_feature(enable = "sve")]
34154#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34155#[cfg_attr(test, assert_instr(rbit))]
34156pub fn svrbit_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t {
34157    svrbit_u8_m(op, pg, op)
34158}
34159#[doc = "Reverse bits"]
34160#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_z)"]
34161#[inline(always)]
34162#[target_feature(enable = "sve")]
34163#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34164#[cfg_attr(test, assert_instr(rbit))]
34165pub fn svrbit_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t {
34166    svrbit_u8_m(svdup_n_u8(0), pg, op)
34167}
34168#[doc = "Reverse bits"]
34169#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_m)"]
34170#[inline(always)]
34171#[target_feature(enable = "sve")]
34172#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34173#[cfg_attr(test, assert_instr(rbit))]
34174pub fn svrbit_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
34175    unsafe { svrbit_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
34176}
34177#[doc = "Reverse bits"]
34178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_x)"]
34179#[inline(always)]
34180#[target_feature(enable = "sve")]
34181#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34182#[cfg_attr(test, assert_instr(rbit))]
34183pub fn svrbit_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
34184    svrbit_u16_m(op, pg, op)
34185}
34186#[doc = "Reverse bits"]
34187#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_z)"]
34188#[inline(always)]
34189#[target_feature(enable = "sve")]
34190#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34191#[cfg_attr(test, assert_instr(rbit))]
34192pub fn svrbit_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
34193    svrbit_u16_m(svdup_n_u16(0), pg, op)
34194}
34195#[doc = "Reverse bits"]
34196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_m)"]
34197#[inline(always)]
34198#[target_feature(enable = "sve")]
34199#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34200#[cfg_attr(test, assert_instr(rbit))]
34201pub fn svrbit_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
34202    unsafe { svrbit_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
34203}
34204#[doc = "Reverse bits"]
34205#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_x)"]
34206#[inline(always)]
34207#[target_feature(enable = "sve")]
34208#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34209#[cfg_attr(test, assert_instr(rbit))]
34210pub fn svrbit_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
34211    svrbit_u32_m(op, pg, op)
34212}
34213#[doc = "Reverse bits"]
34214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_z)"]
34215#[inline(always)]
34216#[target_feature(enable = "sve")]
34217#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34218#[cfg_attr(test, assert_instr(rbit))]
34219pub fn svrbit_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
34220    svrbit_u32_m(svdup_n_u32(0), pg, op)
34221}
34222#[doc = "Reverse bits"]
34223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_m)"]
34224#[inline(always)]
34225#[target_feature(enable = "sve")]
34226#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34227#[cfg_attr(test, assert_instr(rbit))]
34228pub fn svrbit_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
34229    unsafe { svrbit_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
34230}
34231#[doc = "Reverse bits"]
34232#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_x)"]
34233#[inline(always)]
34234#[target_feature(enable = "sve")]
34235#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34236#[cfg_attr(test, assert_instr(rbit))]
34237pub fn svrbit_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
34238    svrbit_u64_m(op, pg, op)
34239}
34240#[doc = "Reverse bits"]
34241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_z)"]
34242#[inline(always)]
34243#[target_feature(enable = "sve")]
34244#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34245#[cfg_attr(test, assert_instr(rbit))]
34246pub fn svrbit_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
34247    svrbit_u64_m(svdup_n_u64(0), pg, op)
34248}
34249#[doc = "Read FFR, returning predicate of succesfully loaded elements"]
34250#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr)"]
34251#[inline(always)]
34252#[target_feature(enable = "sve")]
34253#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34254#[cfg_attr(test, assert_instr(rdffr))]
34255pub fn svrdffr() -> svbool_t {
34256    svrdffr_z(svptrue_b8())
34257}
34258#[doc = "Read FFR, returning predicate of succesfully loaded elements"]
34259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr_z)"]
34260#[inline(always)]
34261#[target_feature(enable = "sve")]
34262#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34263#[cfg_attr(test, assert_instr(rdffr))]
34264pub fn svrdffr_z(pg: svbool_t) -> svbool_t {
34265    unsafe extern "unadjusted" {
34266        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rdffr.z")]
34267        fn _svrdffr_z(pg: svbool_t) -> svbool_t;
34268    }
34269    unsafe { _svrdffr_z(pg) }
34270}
34271#[doc = "Reciprocal estimate"]
34272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f32])"]
34273#[inline(always)]
34274#[target_feature(enable = "sve")]
34275#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34276#[cfg_attr(test, assert_instr(frecpe))]
34277pub fn svrecpe_f32(op: svfloat32_t) -> svfloat32_t {
34278    unsafe extern "unadjusted" {
34279        #[cfg_attr(
34280            target_arch = "aarch64",
34281            link_name = "llvm.aarch64.sve.frecpe.x.nxv4f32"
34282        )]
34283        fn _svrecpe_f32(op: svfloat32_t) -> svfloat32_t;
34284    }
34285    unsafe { _svrecpe_f32(op) }
34286}
34287#[doc = "Reciprocal estimate"]
34288#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f64])"]
34289#[inline(always)]
34290#[target_feature(enable = "sve")]
34291#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34292#[cfg_attr(test, assert_instr(frecpe))]
34293pub fn svrecpe_f64(op: svfloat64_t) -> svfloat64_t {
34294    unsafe extern "unadjusted" {
34295        #[cfg_attr(
34296            target_arch = "aarch64",
34297            link_name = "llvm.aarch64.sve.frecpe.x.nxv2f64"
34298        )]
34299        fn _svrecpe_f64(op: svfloat64_t) -> svfloat64_t;
34300    }
34301    unsafe { _svrecpe_f64(op) }
34302}
34303#[doc = "Reciprocal step"]
34304#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f32])"]
34305#[inline(always)]
34306#[target_feature(enable = "sve")]
34307#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34308#[cfg_attr(test, assert_instr(frecps))]
34309pub fn svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
34310    unsafe extern "unadjusted" {
34311        #[cfg_attr(
34312            target_arch = "aarch64",
34313            link_name = "llvm.aarch64.sve.frecps.x.nxv4f32"
34314        )]
34315        fn _svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
34316    }
34317    unsafe { _svrecps_f32(op1, op2) }
34318}
34319#[doc = "Reciprocal step"]
34320#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f64])"]
34321#[inline(always)]
34322#[target_feature(enable = "sve")]
34323#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34324#[cfg_attr(test, assert_instr(frecps))]
34325pub fn svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
34326    unsafe extern "unadjusted" {
34327        #[cfg_attr(
34328            target_arch = "aarch64",
34329            link_name = "llvm.aarch64.sve.frecps.x.nxv2f64"
34330        )]
34331        fn _svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
34332    }
34333    unsafe { _svrecps_f64(op1, op2) }
34334}
34335#[doc = "Reciprocal exponent"]
34336#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_m)"]
34337#[inline(always)]
34338#[target_feature(enable = "sve")]
34339#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34340#[cfg_attr(test, assert_instr(frecpx))]
34341pub fn svrecpx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
34342    unsafe extern "unadjusted" {
34343        #[cfg_attr(
34344            target_arch = "aarch64",
34345            link_name = "llvm.aarch64.sve.frecpx.x.nxv4f32"
34346        )]
34347        fn _svrecpx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
34348    }
34349    unsafe { _svrecpx_f32_m(inactive, pg.sve_into(), op) }
34350}
34351#[doc = "Reciprocal exponent"]
34352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_x)"]
34353#[inline(always)]
34354#[target_feature(enable = "sve")]
34355#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34356#[cfg_attr(test, assert_instr(frecpx))]
34357pub fn svrecpx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
34358    svrecpx_f32_m(op, pg, op)
34359}
34360#[doc = "Reciprocal exponent"]
34361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_z)"]
34362#[inline(always)]
34363#[target_feature(enable = "sve")]
34364#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34365#[cfg_attr(test, assert_instr(frecpx))]
34366pub fn svrecpx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
34367    svrecpx_f32_m(svdup_n_f32(0.0), pg, op)
34368}
34369#[doc = "Reciprocal exponent"]
34370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_m)"]
34371#[inline(always)]
34372#[target_feature(enable = "sve")]
34373#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34374#[cfg_attr(test, assert_instr(frecpx))]
34375pub fn svrecpx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
34376    unsafe extern "unadjusted" {
34377        #[cfg_attr(
34378            target_arch = "aarch64",
34379            link_name = "llvm.aarch64.sve.frecpx.x.nxv2f64"
34380        )]
34381        fn _svrecpx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
34382    }
34383    unsafe { _svrecpx_f64_m(inactive, pg.sve_into(), op) }
34384}
34385#[doc = "Reciprocal exponent"]
34386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_x)"]
34387#[inline(always)]
34388#[target_feature(enable = "sve")]
34389#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34390#[cfg_attr(test, assert_instr(frecpx))]
34391pub fn svrecpx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
34392    svrecpx_f64_m(op, pg, op)
34393}
34394#[doc = "Reciprocal exponent"]
34395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_z)"]
34396#[inline(always)]
34397#[target_feature(enable = "sve")]
34398#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34399#[cfg_attr(test, assert_instr(frecpx))]
34400pub fn svrecpx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
34401    svrecpx_f64_m(svdup_n_f64(0.0), pg, op)
34402}
34403#[doc = "Reinterpret vector contents"]
34404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f32])"]
34405#[inline(always)]
34406#[target_feature(enable = "sve")]
34407#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34408pub fn svreinterpret_f32_f32(op: svfloat32_t) -> svfloat32_t {
34409    unsafe { crate::intrinsics::transmute_unchecked(op) }
34410}
34411#[doc = "Reinterpret vector contents"]
34412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f64])"]
34413#[inline(always)]
34414#[target_feature(enable = "sve")]
34415#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34416pub fn svreinterpret_f32_f64(op: svfloat64_t) -> svfloat32_t {
34417    unsafe { crate::intrinsics::transmute_unchecked(op) }
34418}
34419#[doc = "Reinterpret vector contents"]
34420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s8])"]
34421#[inline(always)]
34422#[target_feature(enable = "sve")]
34423#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34424pub fn svreinterpret_f32_s8(op: svint8_t) -> svfloat32_t {
34425    unsafe { crate::intrinsics::transmute_unchecked(op) }
34426}
34427#[doc = "Reinterpret vector contents"]
34428#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s16])"]
34429#[inline(always)]
34430#[target_feature(enable = "sve")]
34431#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34432pub fn svreinterpret_f32_s16(op: svint16_t) -> svfloat32_t {
34433    unsafe { crate::intrinsics::transmute_unchecked(op) }
34434}
34435#[doc = "Reinterpret vector contents"]
34436#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s32])"]
34437#[inline(always)]
34438#[target_feature(enable = "sve")]
34439#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34440pub fn svreinterpret_f32_s32(op: svint32_t) -> svfloat32_t {
34441    unsafe { crate::intrinsics::transmute_unchecked(op) }
34442}
34443#[doc = "Reinterpret vector contents"]
34444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s64])"]
34445#[inline(always)]
34446#[target_feature(enable = "sve")]
34447#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34448pub fn svreinterpret_f32_s64(op: svint64_t) -> svfloat32_t {
34449    unsafe { crate::intrinsics::transmute_unchecked(op) }
34450}
34451#[doc = "Reinterpret vector contents"]
34452#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u8])"]
34453#[inline(always)]
34454#[target_feature(enable = "sve")]
34455#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34456pub fn svreinterpret_f32_u8(op: svuint8_t) -> svfloat32_t {
34457    unsafe { crate::intrinsics::transmute_unchecked(op) }
34458}
34459#[doc = "Reinterpret vector contents"]
34460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u16])"]
34461#[inline(always)]
34462#[target_feature(enable = "sve")]
34463#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34464pub fn svreinterpret_f32_u16(op: svuint16_t) -> svfloat32_t {
34465    unsafe { crate::intrinsics::transmute_unchecked(op) }
34466}
34467#[doc = "Reinterpret vector contents"]
34468#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u32])"]
34469#[inline(always)]
34470#[target_feature(enable = "sve")]
34471#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34472pub fn svreinterpret_f32_u32(op: svuint32_t) -> svfloat32_t {
34473    unsafe { crate::intrinsics::transmute_unchecked(op) }
34474}
34475#[doc = "Reinterpret vector contents"]
34476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u64])"]
34477#[inline(always)]
34478#[target_feature(enable = "sve")]
34479#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34480pub fn svreinterpret_f32_u64(op: svuint64_t) -> svfloat32_t {
34481    unsafe { crate::intrinsics::transmute_unchecked(op) }
34482}
34483#[doc = "Reinterpret vector contents"]
34484#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f32])"]
34485#[inline(always)]
34486#[target_feature(enable = "sve")]
34487#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34488pub fn svreinterpret_f64_f32(op: svfloat32_t) -> svfloat64_t {
34489    unsafe { crate::intrinsics::transmute_unchecked(op) }
34490}
34491#[doc = "Reinterpret vector contents"]
34492#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f64])"]
34493#[inline(always)]
34494#[target_feature(enable = "sve")]
34495#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34496pub fn svreinterpret_f64_f64(op: svfloat64_t) -> svfloat64_t {
34497    unsafe { crate::intrinsics::transmute_unchecked(op) }
34498}
34499#[doc = "Reinterpret vector contents"]
34500#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s8])"]
34501#[inline(always)]
34502#[target_feature(enable = "sve")]
34503#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34504pub fn svreinterpret_f64_s8(op: svint8_t) -> svfloat64_t {
34505    unsafe { crate::intrinsics::transmute_unchecked(op) }
34506}
34507#[doc = "Reinterpret vector contents"]
34508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s16])"]
34509#[inline(always)]
34510#[target_feature(enable = "sve")]
34511#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34512pub fn svreinterpret_f64_s16(op: svint16_t) -> svfloat64_t {
34513    unsafe { crate::intrinsics::transmute_unchecked(op) }
34514}
34515#[doc = "Reinterpret vector contents"]
34516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s32])"]
34517#[inline(always)]
34518#[target_feature(enable = "sve")]
34519#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34520pub fn svreinterpret_f64_s32(op: svint32_t) -> svfloat64_t {
34521    unsafe { crate::intrinsics::transmute_unchecked(op) }
34522}
34523#[doc = "Reinterpret vector contents"]
34524#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s64])"]
34525#[inline(always)]
34526#[target_feature(enable = "sve")]
34527#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34528pub fn svreinterpret_f64_s64(op: svint64_t) -> svfloat64_t {
34529    unsafe { crate::intrinsics::transmute_unchecked(op) }
34530}
34531#[doc = "Reinterpret vector contents"]
34532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u8])"]
34533#[inline(always)]
34534#[target_feature(enable = "sve")]
34535#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34536pub fn svreinterpret_f64_u8(op: svuint8_t) -> svfloat64_t {
34537    unsafe { crate::intrinsics::transmute_unchecked(op) }
34538}
34539#[doc = "Reinterpret vector contents"]
34540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u16])"]
34541#[inline(always)]
34542#[target_feature(enable = "sve")]
34543#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34544pub fn svreinterpret_f64_u16(op: svuint16_t) -> svfloat64_t {
34545    unsafe { crate::intrinsics::transmute_unchecked(op) }
34546}
34547#[doc = "Reinterpret vector contents"]
34548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u32])"]
34549#[inline(always)]
34550#[target_feature(enable = "sve")]
34551#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34552pub fn svreinterpret_f64_u32(op: svuint32_t) -> svfloat64_t {
34553    unsafe { crate::intrinsics::transmute_unchecked(op) }
34554}
34555#[doc = "Reinterpret vector contents"]
34556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u64])"]
34557#[inline(always)]
34558#[target_feature(enable = "sve")]
34559#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34560pub fn svreinterpret_f64_u64(op: svuint64_t) -> svfloat64_t {
34561    unsafe { crate::intrinsics::transmute_unchecked(op) }
34562}
34563#[doc = "Reinterpret vector contents"]
34564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f32])"]
34565#[inline(always)]
34566#[target_feature(enable = "sve")]
34567#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34568pub fn svreinterpret_s8_f32(op: svfloat32_t) -> svint8_t {
34569    unsafe { crate::intrinsics::transmute_unchecked(op) }
34570}
34571#[doc = "Reinterpret vector contents"]
34572#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f64])"]
34573#[inline(always)]
34574#[target_feature(enable = "sve")]
34575#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34576pub fn svreinterpret_s8_f64(op: svfloat64_t) -> svint8_t {
34577    unsafe { crate::intrinsics::transmute_unchecked(op) }
34578}
34579#[doc = "Reinterpret vector contents"]
34580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s8])"]
34581#[inline(always)]
34582#[target_feature(enable = "sve")]
34583#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34584pub fn svreinterpret_s8_s8(op: svint8_t) -> svint8_t {
34585    unsafe { crate::intrinsics::transmute_unchecked(op) }
34586}
34587#[doc = "Reinterpret vector contents"]
34588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s16])"]
34589#[inline(always)]
34590#[target_feature(enable = "sve")]
34591#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34592pub fn svreinterpret_s8_s16(op: svint16_t) -> svint8_t {
34593    unsafe { crate::intrinsics::transmute_unchecked(op) }
34594}
34595#[doc = "Reinterpret vector contents"]
34596#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s32])"]
34597#[inline(always)]
34598#[target_feature(enable = "sve")]
34599#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34600pub fn svreinterpret_s8_s32(op: svint32_t) -> svint8_t {
34601    unsafe { crate::intrinsics::transmute_unchecked(op) }
34602}
34603#[doc = "Reinterpret vector contents"]
34604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s64])"]
34605#[inline(always)]
34606#[target_feature(enable = "sve")]
34607#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34608pub fn svreinterpret_s8_s64(op: svint64_t) -> svint8_t {
34609    unsafe { crate::intrinsics::transmute_unchecked(op) }
34610}
34611#[doc = "Reinterpret vector contents"]
34612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u8])"]
34613#[inline(always)]
34614#[target_feature(enable = "sve")]
34615#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34616pub fn svreinterpret_s8_u8(op: svuint8_t) -> svint8_t {
34617    unsafe { crate::intrinsics::transmute_unchecked(op) }
34618}
34619#[doc = "Reinterpret vector contents"]
34620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u16])"]
34621#[inline(always)]
34622#[target_feature(enable = "sve")]
34623#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34624pub fn svreinterpret_s8_u16(op: svuint16_t) -> svint8_t {
34625    unsafe { crate::intrinsics::transmute_unchecked(op) }
34626}
34627#[doc = "Reinterpret vector contents"]
34628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u32])"]
34629#[inline(always)]
34630#[target_feature(enable = "sve")]
34631#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34632pub fn svreinterpret_s8_u32(op: svuint32_t) -> svint8_t {
34633    unsafe { crate::intrinsics::transmute_unchecked(op) }
34634}
34635#[doc = "Reinterpret vector contents"]
34636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u64])"]
34637#[inline(always)]
34638#[target_feature(enable = "sve")]
34639#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34640pub fn svreinterpret_s8_u64(op: svuint64_t) -> svint8_t {
34641    unsafe { crate::intrinsics::transmute_unchecked(op) }
34642}
34643#[doc = "Reinterpret vector contents"]
34644#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f32])"]
34645#[inline(always)]
34646#[target_feature(enable = "sve")]
34647#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34648pub fn svreinterpret_s16_f32(op: svfloat32_t) -> svint16_t {
34649    unsafe { crate::intrinsics::transmute_unchecked(op) }
34650}
34651#[doc = "Reinterpret vector contents"]
34652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f64])"]
34653#[inline(always)]
34654#[target_feature(enable = "sve")]
34655#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34656pub fn svreinterpret_s16_f64(op: svfloat64_t) -> svint16_t {
34657    unsafe { crate::intrinsics::transmute_unchecked(op) }
34658}
34659#[doc = "Reinterpret vector contents"]
34660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s8])"]
34661#[inline(always)]
34662#[target_feature(enable = "sve")]
34663#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34664pub fn svreinterpret_s16_s8(op: svint8_t) -> svint16_t {
34665    unsafe { crate::intrinsics::transmute_unchecked(op) }
34666}
34667#[doc = "Reinterpret vector contents"]
34668#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s16])"]
34669#[inline(always)]
34670#[target_feature(enable = "sve")]
34671#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34672pub fn svreinterpret_s16_s16(op: svint16_t) -> svint16_t {
34673    unsafe { crate::intrinsics::transmute_unchecked(op) }
34674}
34675#[doc = "Reinterpret vector contents"]
34676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s32])"]
34677#[inline(always)]
34678#[target_feature(enable = "sve")]
34679#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34680pub fn svreinterpret_s16_s32(op: svint32_t) -> svint16_t {
34681    unsafe { crate::intrinsics::transmute_unchecked(op) }
34682}
34683#[doc = "Reinterpret vector contents"]
34684#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s64])"]
34685#[inline(always)]
34686#[target_feature(enable = "sve")]
34687#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34688pub fn svreinterpret_s16_s64(op: svint64_t) -> svint16_t {
34689    unsafe { crate::intrinsics::transmute_unchecked(op) }
34690}
34691#[doc = "Reinterpret vector contents"]
34692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u8])"]
34693#[inline(always)]
34694#[target_feature(enable = "sve")]
34695#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34696pub fn svreinterpret_s16_u8(op: svuint8_t) -> svint16_t {
34697    unsafe { crate::intrinsics::transmute_unchecked(op) }
34698}
34699#[doc = "Reinterpret vector contents"]
34700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u16])"]
34701#[inline(always)]
34702#[target_feature(enable = "sve")]
34703#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34704pub fn svreinterpret_s16_u16(op: svuint16_t) -> svint16_t {
34705    unsafe { crate::intrinsics::transmute_unchecked(op) }
34706}
34707#[doc = "Reinterpret vector contents"]
34708#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u32])"]
34709#[inline(always)]
34710#[target_feature(enable = "sve")]
34711#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34712pub fn svreinterpret_s16_u32(op: svuint32_t) -> svint16_t {
34713    unsafe { crate::intrinsics::transmute_unchecked(op) }
34714}
34715#[doc = "Reinterpret vector contents"]
34716#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u64])"]
34717#[inline(always)]
34718#[target_feature(enable = "sve")]
34719#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34720pub fn svreinterpret_s16_u64(op: svuint64_t) -> svint16_t {
34721    unsafe { crate::intrinsics::transmute_unchecked(op) }
34722}
34723#[doc = "Reinterpret vector contents"]
34724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f32])"]
34725#[inline(always)]
34726#[target_feature(enable = "sve")]
34727#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34728pub fn svreinterpret_s32_f32(op: svfloat32_t) -> svint32_t {
34729    unsafe { crate::intrinsics::transmute_unchecked(op) }
34730}
34731#[doc = "Reinterpret vector contents"]
34732#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f64])"]
34733#[inline(always)]
34734#[target_feature(enable = "sve")]
34735#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34736pub fn svreinterpret_s32_f64(op: svfloat64_t) -> svint32_t {
34737    unsafe { crate::intrinsics::transmute_unchecked(op) }
34738}
34739#[doc = "Reinterpret vector contents"]
34740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s8])"]
34741#[inline(always)]
34742#[target_feature(enable = "sve")]
34743#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34744pub fn svreinterpret_s32_s8(op: svint8_t) -> svint32_t {
34745    unsafe { crate::intrinsics::transmute_unchecked(op) }
34746}
34747#[doc = "Reinterpret vector contents"]
34748#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s16])"]
34749#[inline(always)]
34750#[target_feature(enable = "sve")]
34751#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34752pub fn svreinterpret_s32_s16(op: svint16_t) -> svint32_t {
34753    unsafe { crate::intrinsics::transmute_unchecked(op) }
34754}
34755#[doc = "Reinterpret vector contents"]
34756#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s32])"]
34757#[inline(always)]
34758#[target_feature(enable = "sve")]
34759#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34760pub fn svreinterpret_s32_s32(op: svint32_t) -> svint32_t {
34761    unsafe { crate::intrinsics::transmute_unchecked(op) }
34762}
34763#[doc = "Reinterpret vector contents"]
34764#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s64])"]
34765#[inline(always)]
34766#[target_feature(enable = "sve")]
34767#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34768pub fn svreinterpret_s32_s64(op: svint64_t) -> svint32_t {
34769    unsafe { crate::intrinsics::transmute_unchecked(op) }
34770}
34771#[doc = "Reinterpret vector contents"]
34772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u8])"]
34773#[inline(always)]
34774#[target_feature(enable = "sve")]
34775#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34776pub fn svreinterpret_s32_u8(op: svuint8_t) -> svint32_t {
34777    unsafe { crate::intrinsics::transmute_unchecked(op) }
34778}
34779#[doc = "Reinterpret vector contents"]
34780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u16])"]
34781#[inline(always)]
34782#[target_feature(enable = "sve")]
34783#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34784pub fn svreinterpret_s32_u16(op: svuint16_t) -> svint32_t {
34785    unsafe { crate::intrinsics::transmute_unchecked(op) }
34786}
34787#[doc = "Reinterpret vector contents"]
34788#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u32])"]
34789#[inline(always)]
34790#[target_feature(enable = "sve")]
34791#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34792pub fn svreinterpret_s32_u32(op: svuint32_t) -> svint32_t {
34793    unsafe { crate::intrinsics::transmute_unchecked(op) }
34794}
34795#[doc = "Reinterpret vector contents"]
34796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u64])"]
34797#[inline(always)]
34798#[target_feature(enable = "sve")]
34799#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34800pub fn svreinterpret_s32_u64(op: svuint64_t) -> svint32_t {
34801    unsafe { crate::intrinsics::transmute_unchecked(op) }
34802}
34803#[doc = "Reinterpret vector contents"]
34804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f32])"]
34805#[inline(always)]
34806#[target_feature(enable = "sve")]
34807#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34808pub fn svreinterpret_s64_f32(op: svfloat32_t) -> svint64_t {
34809    unsafe { crate::intrinsics::transmute_unchecked(op) }
34810}
34811#[doc = "Reinterpret vector contents"]
34812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f64])"]
34813#[inline(always)]
34814#[target_feature(enable = "sve")]
34815#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34816pub fn svreinterpret_s64_f64(op: svfloat64_t) -> svint64_t {
34817    unsafe { crate::intrinsics::transmute_unchecked(op) }
34818}
34819#[doc = "Reinterpret vector contents"]
34820#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s8])"]
34821#[inline(always)]
34822#[target_feature(enable = "sve")]
34823#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34824pub fn svreinterpret_s64_s8(op: svint8_t) -> svint64_t {
34825    unsafe { crate::intrinsics::transmute_unchecked(op) }
34826}
34827#[doc = "Reinterpret vector contents"]
34828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s16])"]
34829#[inline(always)]
34830#[target_feature(enable = "sve")]
34831#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34832pub fn svreinterpret_s64_s16(op: svint16_t) -> svint64_t {
34833    unsafe { crate::intrinsics::transmute_unchecked(op) }
34834}
34835#[doc = "Reinterpret vector contents"]
34836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s32])"]
34837#[inline(always)]
34838#[target_feature(enable = "sve")]
34839#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34840pub fn svreinterpret_s64_s32(op: svint32_t) -> svint64_t {
34841    unsafe { crate::intrinsics::transmute_unchecked(op) }
34842}
34843#[doc = "Reinterpret vector contents"]
34844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s64])"]
34845#[inline(always)]
34846#[target_feature(enable = "sve")]
34847#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34848pub fn svreinterpret_s64_s64(op: svint64_t) -> svint64_t {
34849    unsafe { crate::intrinsics::transmute_unchecked(op) }
34850}
34851#[doc = "Reinterpret vector contents"]
34852#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u8])"]
34853#[inline(always)]
34854#[target_feature(enable = "sve")]
34855#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34856pub fn svreinterpret_s64_u8(op: svuint8_t) -> svint64_t {
34857    unsafe { crate::intrinsics::transmute_unchecked(op) }
34858}
34859#[doc = "Reinterpret vector contents"]
34860#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u16])"]
34861#[inline(always)]
34862#[target_feature(enable = "sve")]
34863#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34864pub fn svreinterpret_s64_u16(op: svuint16_t) -> svint64_t {
34865    unsafe { crate::intrinsics::transmute_unchecked(op) }
34866}
34867#[doc = "Reinterpret vector contents"]
34868#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u32])"]
34869#[inline(always)]
34870#[target_feature(enable = "sve")]
34871#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34872pub fn svreinterpret_s64_u32(op: svuint32_t) -> svint64_t {
34873    unsafe { crate::intrinsics::transmute_unchecked(op) }
34874}
34875#[doc = "Reinterpret vector contents"]
34876#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u64])"]
34877#[inline(always)]
34878#[target_feature(enable = "sve")]
34879#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34880pub fn svreinterpret_s64_u64(op: svuint64_t) -> svint64_t {
34881    unsafe { crate::intrinsics::transmute_unchecked(op) }
34882}
34883#[doc = "Reinterpret vector contents"]
34884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f32])"]
34885#[inline(always)]
34886#[target_feature(enable = "sve")]
34887#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34888pub fn svreinterpret_u8_f32(op: svfloat32_t) -> svuint8_t {
34889    unsafe { crate::intrinsics::transmute_unchecked(op) }
34890}
34891#[doc = "Reinterpret vector contents"]
34892#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f64])"]
34893#[inline(always)]
34894#[target_feature(enable = "sve")]
34895#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34896pub fn svreinterpret_u8_f64(op: svfloat64_t) -> svuint8_t {
34897    unsafe { crate::intrinsics::transmute_unchecked(op) }
34898}
34899#[doc = "Reinterpret vector contents"]
34900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s8])"]
34901#[inline(always)]
34902#[target_feature(enable = "sve")]
34903#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34904pub fn svreinterpret_u8_s8(op: svint8_t) -> svuint8_t {
34905    unsafe { crate::intrinsics::transmute_unchecked(op) }
34906}
34907#[doc = "Reinterpret vector contents"]
34908#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s16])"]
34909#[inline(always)]
34910#[target_feature(enable = "sve")]
34911#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34912pub fn svreinterpret_u8_s16(op: svint16_t) -> svuint8_t {
34913    unsafe { crate::intrinsics::transmute_unchecked(op) }
34914}
34915#[doc = "Reinterpret vector contents"]
34916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s32])"]
34917#[inline(always)]
34918#[target_feature(enable = "sve")]
34919#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34920pub fn svreinterpret_u8_s32(op: svint32_t) -> svuint8_t {
34921    unsafe { crate::intrinsics::transmute_unchecked(op) }
34922}
34923#[doc = "Reinterpret vector contents"]
34924#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s64])"]
34925#[inline(always)]
34926#[target_feature(enable = "sve")]
34927#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34928pub fn svreinterpret_u8_s64(op: svint64_t) -> svuint8_t {
34929    unsafe { crate::intrinsics::transmute_unchecked(op) }
34930}
34931#[doc = "Reinterpret vector contents"]
34932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u8])"]
34933#[inline(always)]
34934#[target_feature(enable = "sve")]
34935#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34936pub fn svreinterpret_u8_u8(op: svuint8_t) -> svuint8_t {
34937    unsafe { crate::intrinsics::transmute_unchecked(op) }
34938}
34939#[doc = "Reinterpret vector contents"]
34940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u16])"]
34941#[inline(always)]
34942#[target_feature(enable = "sve")]
34943#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34944pub fn svreinterpret_u8_u16(op: svuint16_t) -> svuint8_t {
34945    unsafe { crate::intrinsics::transmute_unchecked(op) }
34946}
34947#[doc = "Reinterpret vector contents"]
34948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u32])"]
34949#[inline(always)]
34950#[target_feature(enable = "sve")]
34951#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34952pub fn svreinterpret_u8_u32(op: svuint32_t) -> svuint8_t {
34953    unsafe { crate::intrinsics::transmute_unchecked(op) }
34954}
34955#[doc = "Reinterpret vector contents"]
34956#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u64])"]
34957#[inline(always)]
34958#[target_feature(enable = "sve")]
34959#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34960pub fn svreinterpret_u8_u64(op: svuint64_t) -> svuint8_t {
34961    unsafe { crate::intrinsics::transmute_unchecked(op) }
34962}
34963#[doc = "Reinterpret vector contents"]
34964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f32])"]
34965#[inline(always)]
34966#[target_feature(enable = "sve")]
34967#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34968pub fn svreinterpret_u16_f32(op: svfloat32_t) -> svuint16_t {
34969    unsafe { crate::intrinsics::transmute_unchecked(op) }
34970}
34971#[doc = "Reinterpret vector contents"]
34972#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f64])"]
34973#[inline(always)]
34974#[target_feature(enable = "sve")]
34975#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34976pub fn svreinterpret_u16_f64(op: svfloat64_t) -> svuint16_t {
34977    unsafe { crate::intrinsics::transmute_unchecked(op) }
34978}
34979#[doc = "Reinterpret vector contents"]
34980#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s8])"]
34981#[inline(always)]
34982#[target_feature(enable = "sve")]
34983#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34984pub fn svreinterpret_u16_s8(op: svint8_t) -> svuint16_t {
34985    unsafe { crate::intrinsics::transmute_unchecked(op) }
34986}
34987#[doc = "Reinterpret vector contents"]
34988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s16])"]
34989#[inline(always)]
34990#[target_feature(enable = "sve")]
34991#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
34992pub fn svreinterpret_u16_s16(op: svint16_t) -> svuint16_t {
34993    unsafe { crate::intrinsics::transmute_unchecked(op) }
34994}
34995#[doc = "Reinterpret vector contents"]
34996#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s32])"]
34997#[inline(always)]
34998#[target_feature(enable = "sve")]
34999#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35000pub fn svreinterpret_u16_s32(op: svint32_t) -> svuint16_t {
35001    unsafe { crate::intrinsics::transmute_unchecked(op) }
35002}
35003#[doc = "Reinterpret vector contents"]
35004#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s64])"]
35005#[inline(always)]
35006#[target_feature(enable = "sve")]
35007#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35008pub fn svreinterpret_u16_s64(op: svint64_t) -> svuint16_t {
35009    unsafe { crate::intrinsics::transmute_unchecked(op) }
35010}
35011#[doc = "Reinterpret vector contents"]
35012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u8])"]
35013#[inline(always)]
35014#[target_feature(enable = "sve")]
35015#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35016pub fn svreinterpret_u16_u8(op: svuint8_t) -> svuint16_t {
35017    unsafe { crate::intrinsics::transmute_unchecked(op) }
35018}
35019#[doc = "Reinterpret vector contents"]
35020#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u16])"]
35021#[inline(always)]
35022#[target_feature(enable = "sve")]
35023#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35024pub fn svreinterpret_u16_u16(op: svuint16_t) -> svuint16_t {
35025    unsafe { crate::intrinsics::transmute_unchecked(op) }
35026}
35027#[doc = "Reinterpret vector contents"]
35028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u32])"]
35029#[inline(always)]
35030#[target_feature(enable = "sve")]
35031#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35032pub fn svreinterpret_u16_u32(op: svuint32_t) -> svuint16_t {
35033    unsafe { crate::intrinsics::transmute_unchecked(op) }
35034}
35035#[doc = "Reinterpret vector contents"]
35036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u64])"]
35037#[inline(always)]
35038#[target_feature(enable = "sve")]
35039#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35040pub fn svreinterpret_u16_u64(op: svuint64_t) -> svuint16_t {
35041    unsafe { crate::intrinsics::transmute_unchecked(op) }
35042}
35043#[doc = "Reinterpret vector contents"]
35044#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f32])"]
35045#[inline(always)]
35046#[target_feature(enable = "sve")]
35047#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35048pub fn svreinterpret_u32_f32(op: svfloat32_t) -> svuint32_t {
35049    unsafe { crate::intrinsics::transmute_unchecked(op) }
35050}
35051#[doc = "Reinterpret vector contents"]
35052#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f64])"]
35053#[inline(always)]
35054#[target_feature(enable = "sve")]
35055#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35056pub fn svreinterpret_u32_f64(op: svfloat64_t) -> svuint32_t {
35057    unsafe { crate::intrinsics::transmute_unchecked(op) }
35058}
35059#[doc = "Reinterpret vector contents"]
35060#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s8])"]
35061#[inline(always)]
35062#[target_feature(enable = "sve")]
35063#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35064pub fn svreinterpret_u32_s8(op: svint8_t) -> svuint32_t {
35065    unsafe { crate::intrinsics::transmute_unchecked(op) }
35066}
35067#[doc = "Reinterpret vector contents"]
35068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s16])"]
35069#[inline(always)]
35070#[target_feature(enable = "sve")]
35071#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35072pub fn svreinterpret_u32_s16(op: svint16_t) -> svuint32_t {
35073    unsafe { crate::intrinsics::transmute_unchecked(op) }
35074}
35075#[doc = "Reinterpret vector contents"]
35076#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s32])"]
35077#[inline(always)]
35078#[target_feature(enable = "sve")]
35079#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35080pub fn svreinterpret_u32_s32(op: svint32_t) -> svuint32_t {
35081    unsafe { crate::intrinsics::transmute_unchecked(op) }
35082}
35083#[doc = "Reinterpret vector contents"]
35084#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s64])"]
35085#[inline(always)]
35086#[target_feature(enable = "sve")]
35087#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35088pub fn svreinterpret_u32_s64(op: svint64_t) -> svuint32_t {
35089    unsafe { crate::intrinsics::transmute_unchecked(op) }
35090}
35091#[doc = "Reinterpret vector contents"]
35092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u8])"]
35093#[inline(always)]
35094#[target_feature(enable = "sve")]
35095#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35096pub fn svreinterpret_u32_u8(op: svuint8_t) -> svuint32_t {
35097    unsafe { crate::intrinsics::transmute_unchecked(op) }
35098}
35099#[doc = "Reinterpret vector contents"]
35100#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u16])"]
35101#[inline(always)]
35102#[target_feature(enable = "sve")]
35103#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35104pub fn svreinterpret_u32_u16(op: svuint16_t) -> svuint32_t {
35105    unsafe { crate::intrinsics::transmute_unchecked(op) }
35106}
35107#[doc = "Reinterpret vector contents"]
35108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u32])"]
35109#[inline(always)]
35110#[target_feature(enable = "sve")]
35111#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35112pub fn svreinterpret_u32_u32(op: svuint32_t) -> svuint32_t {
35113    unsafe { crate::intrinsics::transmute_unchecked(op) }
35114}
35115#[doc = "Reinterpret vector contents"]
35116#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u64])"]
35117#[inline(always)]
35118#[target_feature(enable = "sve")]
35119#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35120pub fn svreinterpret_u32_u64(op: svuint64_t) -> svuint32_t {
35121    unsafe { crate::intrinsics::transmute_unchecked(op) }
35122}
35123#[doc = "Reinterpret vector contents"]
35124#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f32])"]
35125#[inline(always)]
35126#[target_feature(enable = "sve")]
35127#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35128pub fn svreinterpret_u64_f32(op: svfloat32_t) -> svuint64_t {
35129    unsafe { crate::intrinsics::transmute_unchecked(op) }
35130}
35131#[doc = "Reinterpret vector contents"]
35132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f64])"]
35133#[inline(always)]
35134#[target_feature(enable = "sve")]
35135#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35136pub fn svreinterpret_u64_f64(op: svfloat64_t) -> svuint64_t {
35137    unsafe { crate::intrinsics::transmute_unchecked(op) }
35138}
35139#[doc = "Reinterpret vector contents"]
35140#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s8])"]
35141#[inline(always)]
35142#[target_feature(enable = "sve")]
35143#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35144pub fn svreinterpret_u64_s8(op: svint8_t) -> svuint64_t {
35145    unsafe { crate::intrinsics::transmute_unchecked(op) }
35146}
35147#[doc = "Reinterpret vector contents"]
35148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s16])"]
35149#[inline(always)]
35150#[target_feature(enable = "sve")]
35151#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35152pub fn svreinterpret_u64_s16(op: svint16_t) -> svuint64_t {
35153    unsafe { crate::intrinsics::transmute_unchecked(op) }
35154}
35155#[doc = "Reinterpret vector contents"]
35156#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s32])"]
35157#[inline(always)]
35158#[target_feature(enable = "sve")]
35159#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35160pub fn svreinterpret_u64_s32(op: svint32_t) -> svuint64_t {
35161    unsafe { crate::intrinsics::transmute_unchecked(op) }
35162}
35163#[doc = "Reinterpret vector contents"]
35164#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s64])"]
35165#[inline(always)]
35166#[target_feature(enable = "sve")]
35167#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35168pub fn svreinterpret_u64_s64(op: svint64_t) -> svuint64_t {
35169    unsafe { crate::intrinsics::transmute_unchecked(op) }
35170}
35171#[doc = "Reinterpret vector contents"]
35172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u8])"]
35173#[inline(always)]
35174#[target_feature(enable = "sve")]
35175#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35176pub fn svreinterpret_u64_u8(op: svuint8_t) -> svuint64_t {
35177    unsafe { crate::intrinsics::transmute_unchecked(op) }
35178}
35179#[doc = "Reinterpret vector contents"]
35180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u16])"]
35181#[inline(always)]
35182#[target_feature(enable = "sve")]
35183#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35184pub fn svreinterpret_u64_u16(op: svuint16_t) -> svuint64_t {
35185    unsafe { crate::intrinsics::transmute_unchecked(op) }
35186}
35187#[doc = "Reinterpret vector contents"]
35188#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u32])"]
35189#[inline(always)]
35190#[target_feature(enable = "sve")]
35191#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35192pub fn svreinterpret_u64_u32(op: svuint32_t) -> svuint64_t {
35193    unsafe { crate::intrinsics::transmute_unchecked(op) }
35194}
35195#[doc = "Reinterpret vector contents"]
35196#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u64])"]
35197#[inline(always)]
35198#[target_feature(enable = "sve")]
35199#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35200pub fn svreinterpret_u64_u64(op: svuint64_t) -> svuint64_t {
35201    unsafe { crate::intrinsics::transmute_unchecked(op) }
35202}
35203#[doc = "Reverse all elements"]
35204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b8)"]
35205#[inline(always)]
35206#[target_feature(enable = "sve")]
35207#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35208#[cfg_attr(test, assert_instr(rev))]
35209pub fn svrev_b8(op: svbool_t) -> svbool_t {
35210    unsafe extern "unadjusted" {
35211        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i1")]
35212        fn _svrev_b8(op: svbool_t) -> svbool_t;
35213    }
35214    unsafe { _svrev_b8(op) }
35215}
35216#[doc = "Reverse all elements"]
35217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b16)"]
35218#[inline(always)]
35219#[target_feature(enable = "sve")]
35220#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35221#[cfg_attr(test, assert_instr(rev))]
35222pub fn svrev_b16(op: svbool_t) -> svbool_t {
35223    unsafe extern "unadjusted" {
35224        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i1")]
35225        fn _svrev_b16(op: svbool8_t) -> svbool8_t;
35226    }
35227    unsafe { _svrev_b16(op.sve_into()).sve_into() }
35228}
35229#[doc = "Reverse all elements"]
35230#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b32)"]
35231#[inline(always)]
35232#[target_feature(enable = "sve")]
35233#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35234#[cfg_attr(test, assert_instr(rev))]
35235pub fn svrev_b32(op: svbool_t) -> svbool_t {
35236    unsafe extern "unadjusted" {
35237        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i1")]
35238        fn _svrev_b32(op: svbool4_t) -> svbool4_t;
35239    }
35240    unsafe { _svrev_b32(op.sve_into()).sve_into() }
35241}
35242#[doc = "Reverse all elements"]
35243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b64)"]
35244#[inline(always)]
35245#[target_feature(enable = "sve")]
35246#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35247#[cfg_attr(test, assert_instr(rev))]
35248pub fn svrev_b64(op: svbool_t) -> svbool_t {
35249    unsafe extern "unadjusted" {
35250        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i1")]
35251        fn _svrev_b64(op: svbool2_t) -> svbool2_t;
35252    }
35253    unsafe { _svrev_b64(op.sve_into()).sve_into() }
35254}
35255#[doc = "Reverse all elements"]
35256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f32])"]
35257#[inline(always)]
35258#[target_feature(enable = "sve")]
35259#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35260#[cfg_attr(test, assert_instr(rev))]
35261pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t {
35262    unsafe extern "unadjusted" {
35263        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4f32")]
35264        fn _svrev_f32(op: svfloat32_t) -> svfloat32_t;
35265    }
35266    unsafe { _svrev_f32(op) }
35267}
35268#[doc = "Reverse all elements"]
35269#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f64])"]
35270#[inline(always)]
35271#[target_feature(enable = "sve")]
35272#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35273#[cfg_attr(test, assert_instr(rev))]
35274pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t {
35275    unsafe extern "unadjusted" {
35276        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2f64")]
35277        fn _svrev_f64(op: svfloat64_t) -> svfloat64_t;
35278    }
35279    unsafe { _svrev_f64(op) }
35280}
35281#[doc = "Reverse all elements"]
35282#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s8])"]
35283#[inline(always)]
35284#[target_feature(enable = "sve")]
35285#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35286#[cfg_attr(test, assert_instr(rev))]
35287pub fn svrev_s8(op: svint8_t) -> svint8_t {
35288    unsafe extern "unadjusted" {
35289        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i8")]
35290        fn _svrev_s8(op: svint8_t) -> svint8_t;
35291    }
35292    unsafe { _svrev_s8(op) }
35293}
35294#[doc = "Reverse all elements"]
35295#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s16])"]
35296#[inline(always)]
35297#[target_feature(enable = "sve")]
35298#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35299#[cfg_attr(test, assert_instr(rev))]
35300pub fn svrev_s16(op: svint16_t) -> svint16_t {
35301    unsafe extern "unadjusted" {
35302        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i16")]
35303        fn _svrev_s16(op: svint16_t) -> svint16_t;
35304    }
35305    unsafe { _svrev_s16(op) }
35306}
35307#[doc = "Reverse all elements"]
35308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s32])"]
35309#[inline(always)]
35310#[target_feature(enable = "sve")]
35311#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35312#[cfg_attr(test, assert_instr(rev))]
35313pub fn svrev_s32(op: svint32_t) -> svint32_t {
35314    unsafe extern "unadjusted" {
35315        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i32")]
35316        fn _svrev_s32(op: svint32_t) -> svint32_t;
35317    }
35318    unsafe { _svrev_s32(op) }
35319}
35320#[doc = "Reverse all elements"]
35321#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s64])"]
35322#[inline(always)]
35323#[target_feature(enable = "sve")]
35324#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35325#[cfg_attr(test, assert_instr(rev))]
35326pub fn svrev_s64(op: svint64_t) -> svint64_t {
35327    unsafe extern "unadjusted" {
35328        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i64")]
35329        fn _svrev_s64(op: svint64_t) -> svint64_t;
35330    }
35331    unsafe { _svrev_s64(op) }
35332}
35333#[doc = "Reverse all elements"]
35334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u8])"]
35335#[inline(always)]
35336#[target_feature(enable = "sve")]
35337#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35338#[cfg_attr(test, assert_instr(rev))]
35339pub fn svrev_u8(op: svuint8_t) -> svuint8_t {
35340    unsafe { svrev_s8(op.as_signed()).as_unsigned() }
35341}
35342#[doc = "Reverse all elements"]
35343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u16])"]
35344#[inline(always)]
35345#[target_feature(enable = "sve")]
35346#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35347#[cfg_attr(test, assert_instr(rev))]
35348pub fn svrev_u16(op: svuint16_t) -> svuint16_t {
35349    unsafe { svrev_s16(op.as_signed()).as_unsigned() }
35350}
35351#[doc = "Reverse all elements"]
35352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u32])"]
35353#[inline(always)]
35354#[target_feature(enable = "sve")]
35355#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35356#[cfg_attr(test, assert_instr(rev))]
35357pub fn svrev_u32(op: svuint32_t) -> svuint32_t {
35358    unsafe { svrev_s32(op.as_signed()).as_unsigned() }
35359}
35360#[doc = "Reverse all elements"]
35361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u64])"]
35362#[inline(always)]
35363#[target_feature(enable = "sve")]
35364#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35365#[cfg_attr(test, assert_instr(rev))]
35366pub fn svrev_u64(op: svuint64_t) -> svuint64_t {
35367    unsafe { svrev_s64(op.as_signed()).as_unsigned() }
35368}
35369#[doc = "Reverse bytes within elements"]
35370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_m)"]
35371#[inline(always)]
35372#[target_feature(enable = "sve")]
35373#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35374#[cfg_attr(test, assert_instr(revb))]
35375pub fn svrevb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
35376    unsafe extern "unadjusted" {
35377        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv8i16")]
35378        fn _svrevb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
35379    }
35380    unsafe { _svrevb_s16_m(inactive, pg.sve_into(), op) }
35381}
35382#[doc = "Reverse bytes within elements"]
35383#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_x)"]
35384#[inline(always)]
35385#[target_feature(enable = "sve")]
35386#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35387#[cfg_attr(test, assert_instr(revb))]
35388pub fn svrevb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
35389    svrevb_s16_m(op, pg, op)
35390}
35391#[doc = "Reverse bytes within elements"]
35392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_z)"]
35393#[inline(always)]
35394#[target_feature(enable = "sve")]
35395#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35396#[cfg_attr(test, assert_instr(revb))]
35397pub fn svrevb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
35398    svrevb_s16_m(svdup_n_s16(0), pg, op)
35399}
35400#[doc = "Reverse bytes within elements"]
35401#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_m)"]
35402#[inline(always)]
35403#[target_feature(enable = "sve")]
35404#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35405#[cfg_attr(test, assert_instr(revb))]
35406pub fn svrevb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
35407    unsafe extern "unadjusted" {
35408        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv4i32")]
35409        fn _svrevb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
35410    }
35411    unsafe { _svrevb_s32_m(inactive, pg.sve_into(), op) }
35412}
35413#[doc = "Reverse bytes within elements"]
35414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_x)"]
35415#[inline(always)]
35416#[target_feature(enable = "sve")]
35417#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35418#[cfg_attr(test, assert_instr(revb))]
35419pub fn svrevb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
35420    svrevb_s32_m(op, pg, op)
35421}
35422#[doc = "Reverse bytes within elements"]
35423#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_z)"]
35424#[inline(always)]
35425#[target_feature(enable = "sve")]
35426#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35427#[cfg_attr(test, assert_instr(revb))]
35428pub fn svrevb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
35429    svrevb_s32_m(svdup_n_s32(0), pg, op)
35430}
35431#[doc = "Reverse bytes within elements"]
35432#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_m)"]
35433#[inline(always)]
35434#[target_feature(enable = "sve")]
35435#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35436#[cfg_attr(test, assert_instr(revb))]
35437pub fn svrevb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
35438    unsafe extern "unadjusted" {
35439        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv2i64")]
35440        fn _svrevb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
35441    }
35442    unsafe { _svrevb_s64_m(inactive, pg.sve_into(), op) }
35443}
35444#[doc = "Reverse bytes within elements"]
35445#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_x)"]
35446#[inline(always)]
35447#[target_feature(enable = "sve")]
35448#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35449#[cfg_attr(test, assert_instr(revb))]
35450pub fn svrevb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
35451    svrevb_s64_m(op, pg, op)
35452}
35453#[doc = "Reverse bytes within elements"]
35454#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_z)"]
35455#[inline(always)]
35456#[target_feature(enable = "sve")]
35457#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35458#[cfg_attr(test, assert_instr(revb))]
35459pub fn svrevb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
35460    svrevb_s64_m(svdup_n_s64(0), pg, op)
35461}
35462#[doc = "Reverse bytes within elements"]
35463#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_m)"]
35464#[inline(always)]
35465#[target_feature(enable = "sve")]
35466#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35467#[cfg_attr(test, assert_instr(revb))]
35468pub fn svrevb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
35469    unsafe { svrevb_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
35470}
35471#[doc = "Reverse bytes within elements"]
35472#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_x)"]
35473#[inline(always)]
35474#[target_feature(enable = "sve")]
35475#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35476#[cfg_attr(test, assert_instr(revb))]
35477pub fn svrevb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
35478    svrevb_u16_m(op, pg, op)
35479}
35480#[doc = "Reverse bytes within elements"]
35481#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_z)"]
35482#[inline(always)]
35483#[target_feature(enable = "sve")]
35484#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35485#[cfg_attr(test, assert_instr(revb))]
35486pub fn svrevb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
35487    svrevb_u16_m(svdup_n_u16(0), pg, op)
35488}
35489#[doc = "Reverse bytes within elements"]
35490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_m)"]
35491#[inline(always)]
35492#[target_feature(enable = "sve")]
35493#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35494#[cfg_attr(test, assert_instr(revb))]
35495pub fn svrevb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
35496    unsafe { svrevb_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
35497}
35498#[doc = "Reverse bytes within elements"]
35499#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_x)"]
35500#[inline(always)]
35501#[target_feature(enable = "sve")]
35502#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35503#[cfg_attr(test, assert_instr(revb))]
35504pub fn svrevb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
35505    svrevb_u32_m(op, pg, op)
35506}
35507#[doc = "Reverse bytes within elements"]
35508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_z)"]
35509#[inline(always)]
35510#[target_feature(enable = "sve")]
35511#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35512#[cfg_attr(test, assert_instr(revb))]
35513pub fn svrevb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
35514    svrevb_u32_m(svdup_n_u32(0), pg, op)
35515}
35516#[doc = "Reverse bytes within elements"]
35517#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_m)"]
35518#[inline(always)]
35519#[target_feature(enable = "sve")]
35520#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35521#[cfg_attr(test, assert_instr(revb))]
35522pub fn svrevb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
35523    unsafe { svrevb_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
35524}
35525#[doc = "Reverse bytes within elements"]
35526#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_x)"]
35527#[inline(always)]
35528#[target_feature(enable = "sve")]
35529#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35530#[cfg_attr(test, assert_instr(revb))]
35531pub fn svrevb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
35532    svrevb_u64_m(op, pg, op)
35533}
35534#[doc = "Reverse bytes within elements"]
35535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_z)"]
35536#[inline(always)]
35537#[target_feature(enable = "sve")]
35538#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35539#[cfg_attr(test, assert_instr(revb))]
35540pub fn svrevb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
35541    svrevb_u64_m(svdup_n_u64(0), pg, op)
35542}
35543#[doc = "Reverse halfwords within elements"]
35544#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_m)"]
35545#[inline(always)]
35546#[target_feature(enable = "sve")]
35547#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35548#[cfg_attr(test, assert_instr(revh))]
35549pub fn svrevh_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
35550    unsafe extern "unadjusted" {
35551        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv4i32")]
35552        fn _svrevh_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
35553    }
35554    unsafe { _svrevh_s32_m(inactive, pg.sve_into(), op) }
35555}
35556#[doc = "Reverse halfwords within elements"]
35557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_x)"]
35558#[inline(always)]
35559#[target_feature(enable = "sve")]
35560#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35561#[cfg_attr(test, assert_instr(revh))]
35562pub fn svrevh_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
35563    svrevh_s32_m(op, pg, op)
35564}
35565#[doc = "Reverse halfwords within elements"]
35566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_z)"]
35567#[inline(always)]
35568#[target_feature(enable = "sve")]
35569#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35570#[cfg_attr(test, assert_instr(revh))]
35571pub fn svrevh_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
35572    svrevh_s32_m(svdup_n_s32(0), pg, op)
35573}
35574#[doc = "Reverse halfwords within elements"]
35575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_m)"]
35576#[inline(always)]
35577#[target_feature(enable = "sve")]
35578#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35579#[cfg_attr(test, assert_instr(revh))]
35580pub fn svrevh_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
35581    unsafe extern "unadjusted" {
35582        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv2i64")]
35583        fn _svrevh_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
35584    }
35585    unsafe { _svrevh_s64_m(inactive, pg.sve_into(), op) }
35586}
35587#[doc = "Reverse halfwords within elements"]
35588#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_x)"]
35589#[inline(always)]
35590#[target_feature(enable = "sve")]
35591#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35592#[cfg_attr(test, assert_instr(revh))]
35593pub fn svrevh_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
35594    svrevh_s64_m(op, pg, op)
35595}
35596#[doc = "Reverse halfwords within elements"]
35597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_z)"]
35598#[inline(always)]
35599#[target_feature(enable = "sve")]
35600#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35601#[cfg_attr(test, assert_instr(revh))]
35602pub fn svrevh_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
35603    svrevh_s64_m(svdup_n_s64(0), pg, op)
35604}
35605#[doc = "Reverse halfwords within elements"]
35606#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_m)"]
35607#[inline(always)]
35608#[target_feature(enable = "sve")]
35609#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35610#[cfg_attr(test, assert_instr(revh))]
35611pub fn svrevh_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
35612    unsafe { svrevh_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
35613}
35614#[doc = "Reverse halfwords within elements"]
35615#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_x)"]
35616#[inline(always)]
35617#[target_feature(enable = "sve")]
35618#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35619#[cfg_attr(test, assert_instr(revh))]
35620pub fn svrevh_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
35621    svrevh_u32_m(op, pg, op)
35622}
35623#[doc = "Reverse halfwords within elements"]
35624#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_z)"]
35625#[inline(always)]
35626#[target_feature(enable = "sve")]
35627#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35628#[cfg_attr(test, assert_instr(revh))]
35629pub fn svrevh_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
35630    svrevh_u32_m(svdup_n_u32(0), pg, op)
35631}
35632#[doc = "Reverse halfwords within elements"]
35633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_m)"]
35634#[inline(always)]
35635#[target_feature(enable = "sve")]
35636#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35637#[cfg_attr(test, assert_instr(revh))]
35638pub fn svrevh_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
35639    unsafe { svrevh_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
35640}
35641#[doc = "Reverse halfwords within elements"]
35642#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_x)"]
35643#[inline(always)]
35644#[target_feature(enable = "sve")]
35645#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35646#[cfg_attr(test, assert_instr(revh))]
35647pub fn svrevh_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
35648    svrevh_u64_m(op, pg, op)
35649}
35650#[doc = "Reverse halfwords within elements"]
35651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_z)"]
35652#[inline(always)]
35653#[target_feature(enable = "sve")]
35654#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35655#[cfg_attr(test, assert_instr(revh))]
35656pub fn svrevh_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
35657    svrevh_u64_m(svdup_n_u64(0), pg, op)
35658}
35659#[doc = "Reverse words within elements"]
35660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_m)"]
35661#[inline(always)]
35662#[target_feature(enable = "sve")]
35663#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35664#[cfg_attr(test, assert_instr(revw))]
35665pub fn svrevw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
35666    unsafe extern "unadjusted" {
35667        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revw.nxv2i64")]
35668        fn _svrevw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
35669    }
35670    unsafe { _svrevw_s64_m(inactive, pg.sve_into(), op) }
35671}
35672#[doc = "Reverse words within elements"]
35673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_x)"]
35674#[inline(always)]
35675#[target_feature(enable = "sve")]
35676#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35677#[cfg_attr(test, assert_instr(revw))]
35678pub fn svrevw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
35679    svrevw_s64_m(op, pg, op)
35680}
35681#[doc = "Reverse words within elements"]
35682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_z)"]
35683#[inline(always)]
35684#[target_feature(enable = "sve")]
35685#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35686#[cfg_attr(test, assert_instr(revw))]
35687pub fn svrevw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
35688    svrevw_s64_m(svdup_n_s64(0), pg, op)
35689}
35690#[doc = "Reverse words within elements"]
35691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_m)"]
35692#[inline(always)]
35693#[target_feature(enable = "sve")]
35694#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35695#[cfg_attr(test, assert_instr(revw))]
35696pub fn svrevw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
35697    unsafe { svrevw_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
35698}
35699#[doc = "Reverse words within elements"]
35700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_x)"]
35701#[inline(always)]
35702#[target_feature(enable = "sve")]
35703#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35704#[cfg_attr(test, assert_instr(revw))]
35705pub fn svrevw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
35706    svrevw_u64_m(op, pg, op)
35707}
35708#[doc = "Reverse words within elements"]
35709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_z)"]
35710#[inline(always)]
35711#[target_feature(enable = "sve")]
35712#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35713#[cfg_attr(test, assert_instr(revw))]
35714pub fn svrevw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
35715    svrevw_u64_m(svdup_n_u64(0), pg, op)
35716}
35717#[doc = "Round to nearest, ties away from zero"]
35718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_m)"]
35719#[inline(always)]
35720#[target_feature(enable = "sve")]
35721#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35722#[cfg_attr(test, assert_instr(frinta))]
35723pub fn svrinta_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35724    unsafe extern "unadjusted" {
35725        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv4f32")]
35726        fn _svrinta_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
35727    }
35728    unsafe { _svrinta_f32_m(inactive, pg.sve_into(), op) }
35729}
35730#[doc = "Round to nearest, ties away from zero"]
35731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_x)"]
35732#[inline(always)]
35733#[target_feature(enable = "sve")]
35734#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35735#[cfg_attr(test, assert_instr(frinta))]
35736pub fn svrinta_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35737    svrinta_f32_m(op, pg, op)
35738}
35739#[doc = "Round to nearest, ties away from zero"]
35740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_z)"]
35741#[inline(always)]
35742#[target_feature(enable = "sve")]
35743#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35744#[cfg_attr(test, assert_instr(frinta))]
35745pub fn svrinta_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35746    svrinta_f32_m(svdup_n_f32(0.0), pg, op)
35747}
35748#[doc = "Round to nearest, ties away from zero"]
35749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_m)"]
35750#[inline(always)]
35751#[target_feature(enable = "sve")]
35752#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35753#[cfg_attr(test, assert_instr(frinta))]
35754pub fn svrinta_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35755    unsafe extern "unadjusted" {
35756        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv2f64")]
35757        fn _svrinta_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
35758    }
35759    unsafe { _svrinta_f64_m(inactive, pg.sve_into(), op) }
35760}
35761#[doc = "Round to nearest, ties away from zero"]
35762#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_x)"]
35763#[inline(always)]
35764#[target_feature(enable = "sve")]
35765#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35766#[cfg_attr(test, assert_instr(frinta))]
35767pub fn svrinta_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35768    svrinta_f64_m(op, pg, op)
35769}
35770#[doc = "Round to nearest, ties away from zero"]
35771#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_z)"]
35772#[inline(always)]
35773#[target_feature(enable = "sve")]
35774#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35775#[cfg_attr(test, assert_instr(frinta))]
35776pub fn svrinta_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35777    svrinta_f64_m(svdup_n_f64(0.0), pg, op)
35778}
35779#[doc = "Round using current rounding mode (inexact)"]
35780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_m)"]
35781#[inline(always)]
35782#[target_feature(enable = "sve")]
35783#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35784#[cfg_attr(test, assert_instr(frinti))]
35785pub fn svrinti_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35786    unsafe extern "unadjusted" {
35787        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv4f32")]
35788        fn _svrinti_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
35789    }
35790    unsafe { _svrinti_f32_m(inactive, pg.sve_into(), op) }
35791}
35792#[doc = "Round using current rounding mode (inexact)"]
35793#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_x)"]
35794#[inline(always)]
35795#[target_feature(enable = "sve")]
35796#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35797#[cfg_attr(test, assert_instr(frinti))]
35798pub fn svrinti_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35799    svrinti_f32_m(op, pg, op)
35800}
35801#[doc = "Round using current rounding mode (inexact)"]
35802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_z)"]
35803#[inline(always)]
35804#[target_feature(enable = "sve")]
35805#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35806#[cfg_attr(test, assert_instr(frinti))]
35807pub fn svrinti_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35808    svrinti_f32_m(svdup_n_f32(0.0), pg, op)
35809}
35810#[doc = "Round using current rounding mode (inexact)"]
35811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_m)"]
35812#[inline(always)]
35813#[target_feature(enable = "sve")]
35814#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35815#[cfg_attr(test, assert_instr(frinti))]
35816pub fn svrinti_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35817    unsafe extern "unadjusted" {
35818        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv2f64")]
35819        fn _svrinti_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
35820    }
35821    unsafe { _svrinti_f64_m(inactive, pg.sve_into(), op) }
35822}
35823#[doc = "Round using current rounding mode (inexact)"]
35824#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_x)"]
35825#[inline(always)]
35826#[target_feature(enable = "sve")]
35827#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35828#[cfg_attr(test, assert_instr(frinti))]
35829pub fn svrinti_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35830    svrinti_f64_m(op, pg, op)
35831}
35832#[doc = "Round using current rounding mode (inexact)"]
35833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_z)"]
35834#[inline(always)]
35835#[target_feature(enable = "sve")]
35836#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35837#[cfg_attr(test, assert_instr(frinti))]
35838pub fn svrinti_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35839    svrinti_f64_m(svdup_n_f64(0.0), pg, op)
35840}
35841#[doc = "Round towards -∞"]
35842#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_m)"]
35843#[inline(always)]
35844#[target_feature(enable = "sve")]
35845#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35846#[cfg_attr(test, assert_instr(frintm))]
35847pub fn svrintm_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35848    unsafe extern "unadjusted" {
35849        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv4f32")]
35850        fn _svrintm_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
35851    }
35852    unsafe { _svrintm_f32_m(inactive, pg.sve_into(), op) }
35853}
35854#[doc = "Round towards -∞"]
35855#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_x)"]
35856#[inline(always)]
35857#[target_feature(enable = "sve")]
35858#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35859#[cfg_attr(test, assert_instr(frintm))]
35860pub fn svrintm_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35861    svrintm_f32_m(op, pg, op)
35862}
35863#[doc = "Round towards -∞"]
35864#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_z)"]
35865#[inline(always)]
35866#[target_feature(enable = "sve")]
35867#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35868#[cfg_attr(test, assert_instr(frintm))]
35869pub fn svrintm_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35870    svrintm_f32_m(svdup_n_f32(0.0), pg, op)
35871}
35872#[doc = "Round towards -∞"]
35873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_m)"]
35874#[inline(always)]
35875#[target_feature(enable = "sve")]
35876#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35877#[cfg_attr(test, assert_instr(frintm))]
35878pub fn svrintm_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35879    unsafe extern "unadjusted" {
35880        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv2f64")]
35881        fn _svrintm_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
35882    }
35883    unsafe { _svrintm_f64_m(inactive, pg.sve_into(), op) }
35884}
35885#[doc = "Round towards -∞"]
35886#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_x)"]
35887#[inline(always)]
35888#[target_feature(enable = "sve")]
35889#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35890#[cfg_attr(test, assert_instr(frintm))]
35891pub fn svrintm_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35892    svrintm_f64_m(op, pg, op)
35893}
35894#[doc = "Round towards -∞"]
35895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_z)"]
35896#[inline(always)]
35897#[target_feature(enable = "sve")]
35898#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35899#[cfg_attr(test, assert_instr(frintm))]
35900pub fn svrintm_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35901    svrintm_f64_m(svdup_n_f64(0.0), pg, op)
35902}
35903#[doc = "Round to nearest, ties to even"]
35904#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_m)"]
35905#[inline(always)]
35906#[target_feature(enable = "sve")]
35907#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35908#[cfg_attr(test, assert_instr(frintn))]
35909pub fn svrintn_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35910    unsafe extern "unadjusted" {
35911        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv4f32")]
35912        fn _svrintn_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
35913    }
35914    unsafe { _svrintn_f32_m(inactive, pg.sve_into(), op) }
35915}
35916#[doc = "Round to nearest, ties to even"]
35917#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_x)"]
35918#[inline(always)]
35919#[target_feature(enable = "sve")]
35920#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35921#[cfg_attr(test, assert_instr(frintn))]
35922pub fn svrintn_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35923    svrintn_f32_m(op, pg, op)
35924}
35925#[doc = "Round to nearest, ties to even"]
35926#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_z)"]
35927#[inline(always)]
35928#[target_feature(enable = "sve")]
35929#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35930#[cfg_attr(test, assert_instr(frintn))]
35931pub fn svrintn_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35932    svrintn_f32_m(svdup_n_f32(0.0), pg, op)
35933}
35934#[doc = "Round to nearest, ties to even"]
35935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_m)"]
35936#[inline(always)]
35937#[target_feature(enable = "sve")]
35938#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35939#[cfg_attr(test, assert_instr(frintn))]
35940pub fn svrintn_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35941    unsafe extern "unadjusted" {
35942        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv2f64")]
35943        fn _svrintn_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
35944    }
35945    unsafe { _svrintn_f64_m(inactive, pg.sve_into(), op) }
35946}
35947#[doc = "Round to nearest, ties to even"]
35948#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_x)"]
35949#[inline(always)]
35950#[target_feature(enable = "sve")]
35951#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35952#[cfg_attr(test, assert_instr(frintn))]
35953pub fn svrintn_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35954    svrintn_f64_m(op, pg, op)
35955}
35956#[doc = "Round to nearest, ties to even"]
35957#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_z)"]
35958#[inline(always)]
35959#[target_feature(enable = "sve")]
35960#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35961#[cfg_attr(test, assert_instr(frintn))]
35962pub fn svrintn_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
35963    svrintn_f64_m(svdup_n_f64(0.0), pg, op)
35964}
35965#[doc = "Round towards +∞"]
35966#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_m)"]
35967#[inline(always)]
35968#[target_feature(enable = "sve")]
35969#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35970#[cfg_attr(test, assert_instr(frintp))]
35971pub fn svrintp_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35972    unsafe extern "unadjusted" {
35973        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv4f32")]
35974        fn _svrintp_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
35975    }
35976    unsafe { _svrintp_f32_m(inactive, pg.sve_into(), op) }
35977}
35978#[doc = "Round towards +∞"]
35979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_x)"]
35980#[inline(always)]
35981#[target_feature(enable = "sve")]
35982#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35983#[cfg_attr(test, assert_instr(frintp))]
35984pub fn svrintp_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35985    svrintp_f32_m(op, pg, op)
35986}
35987#[doc = "Round towards +∞"]
35988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_z)"]
35989#[inline(always)]
35990#[target_feature(enable = "sve")]
35991#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
35992#[cfg_attr(test, assert_instr(frintp))]
35993pub fn svrintp_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
35994    svrintp_f32_m(svdup_n_f32(0.0), pg, op)
35995}
35996#[doc = "Round towards +∞"]
35997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_m)"]
35998#[inline(always)]
35999#[target_feature(enable = "sve")]
36000#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36001#[cfg_attr(test, assert_instr(frintp))]
36002pub fn svrintp_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36003    unsafe extern "unadjusted" {
36004        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv2f64")]
36005        fn _svrintp_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
36006    }
36007    unsafe { _svrintp_f64_m(inactive, pg.sve_into(), op) }
36008}
36009#[doc = "Round towards +∞"]
36010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_x)"]
36011#[inline(always)]
36012#[target_feature(enable = "sve")]
36013#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36014#[cfg_attr(test, assert_instr(frintp))]
36015pub fn svrintp_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36016    svrintp_f64_m(op, pg, op)
36017}
36018#[doc = "Round towards +∞"]
36019#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_z)"]
36020#[inline(always)]
36021#[target_feature(enable = "sve")]
36022#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36023#[cfg_attr(test, assert_instr(frintp))]
36024pub fn svrintp_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36025    svrintp_f64_m(svdup_n_f64(0.0), pg, op)
36026}
36027#[doc = "Round using current rounding mode (exact)"]
36028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_m)"]
36029#[inline(always)]
36030#[target_feature(enable = "sve")]
36031#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36032#[cfg_attr(test, assert_instr(frintx))]
36033pub fn svrintx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
36034    unsafe extern "unadjusted" {
36035        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv4f32")]
36036        fn _svrintx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
36037    }
36038    unsafe { _svrintx_f32_m(inactive, pg.sve_into(), op) }
36039}
36040#[doc = "Round using current rounding mode (exact)"]
36041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_x)"]
36042#[inline(always)]
36043#[target_feature(enable = "sve")]
36044#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36045#[cfg_attr(test, assert_instr(frintx))]
36046pub fn svrintx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
36047    svrintx_f32_m(op, pg, op)
36048}
36049#[doc = "Round using current rounding mode (exact)"]
36050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_z)"]
36051#[inline(always)]
36052#[target_feature(enable = "sve")]
36053#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36054#[cfg_attr(test, assert_instr(frintx))]
36055pub fn svrintx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
36056    svrintx_f32_m(svdup_n_f32(0.0), pg, op)
36057}
36058#[doc = "Round using current rounding mode (exact)"]
36059#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_m)"]
36060#[inline(always)]
36061#[target_feature(enable = "sve")]
36062#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36063#[cfg_attr(test, assert_instr(frintx))]
36064pub fn svrintx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36065    unsafe extern "unadjusted" {
36066        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv2f64")]
36067        fn _svrintx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
36068    }
36069    unsafe { _svrintx_f64_m(inactive, pg.sve_into(), op) }
36070}
36071#[doc = "Round using current rounding mode (exact)"]
36072#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_x)"]
36073#[inline(always)]
36074#[target_feature(enable = "sve")]
36075#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36076#[cfg_attr(test, assert_instr(frintx))]
36077pub fn svrintx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36078    svrintx_f64_m(op, pg, op)
36079}
36080#[doc = "Round using current rounding mode (exact)"]
36081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_z)"]
36082#[inline(always)]
36083#[target_feature(enable = "sve")]
36084#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36085#[cfg_attr(test, assert_instr(frintx))]
36086pub fn svrintx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36087    svrintx_f64_m(svdup_n_f64(0.0), pg, op)
36088}
36089#[doc = "Round towards zero"]
36090#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_m)"]
36091#[inline(always)]
36092#[target_feature(enable = "sve")]
36093#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36094#[cfg_attr(test, assert_instr(frintz))]
36095pub fn svrintz_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
36096    unsafe extern "unadjusted" {
36097        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv4f32")]
36098        fn _svrintz_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
36099    }
36100    unsafe { _svrintz_f32_m(inactive, pg.sve_into(), op) }
36101}
36102#[doc = "Round towards zero"]
36103#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_x)"]
36104#[inline(always)]
36105#[target_feature(enable = "sve")]
36106#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36107#[cfg_attr(test, assert_instr(frintz))]
36108pub fn svrintz_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
36109    svrintz_f32_m(op, pg, op)
36110}
36111#[doc = "Round towards zero"]
36112#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_z)"]
36113#[inline(always)]
36114#[target_feature(enable = "sve")]
36115#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36116#[cfg_attr(test, assert_instr(frintz))]
36117pub fn svrintz_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
36118    svrintz_f32_m(svdup_n_f32(0.0), pg, op)
36119}
36120#[doc = "Round towards zero"]
36121#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_m)"]
36122#[inline(always)]
36123#[target_feature(enable = "sve")]
36124#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36125#[cfg_attr(test, assert_instr(frintz))]
36126pub fn svrintz_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36127    unsafe extern "unadjusted" {
36128        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv2f64")]
36129        fn _svrintz_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
36130    }
36131    unsafe { _svrintz_f64_m(inactive, pg.sve_into(), op) }
36132}
36133#[doc = "Round towards zero"]
36134#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_x)"]
36135#[inline(always)]
36136#[target_feature(enable = "sve")]
36137#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36138#[cfg_attr(test, assert_instr(frintz))]
36139pub fn svrintz_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36140    svrintz_f64_m(op, pg, op)
36141}
36142#[doc = "Round towards zero"]
36143#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_z)"]
36144#[inline(always)]
36145#[target_feature(enable = "sve")]
36146#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36147#[cfg_attr(test, assert_instr(frintz))]
36148pub fn svrintz_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36149    svrintz_f64_m(svdup_n_f64(0.0), pg, op)
36150}
36151#[doc = "Reciprocal square root estimate"]
36152#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f32])"]
36153#[inline(always)]
36154#[target_feature(enable = "sve")]
36155#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36156#[cfg_attr(test, assert_instr(frsqrte))]
36157pub fn svrsqrte_f32(op: svfloat32_t) -> svfloat32_t {
36158    unsafe extern "unadjusted" {
36159        #[cfg_attr(
36160            target_arch = "aarch64",
36161            link_name = "llvm.aarch64.sve.frsqrte.x.nxv4f32"
36162        )]
36163        fn _svrsqrte_f32(op: svfloat32_t) -> svfloat32_t;
36164    }
36165    unsafe { _svrsqrte_f32(op) }
36166}
36167#[doc = "Reciprocal square root estimate"]
36168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f64])"]
36169#[inline(always)]
36170#[target_feature(enable = "sve")]
36171#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36172#[cfg_attr(test, assert_instr(frsqrte))]
36173pub fn svrsqrte_f64(op: svfloat64_t) -> svfloat64_t {
36174    unsafe extern "unadjusted" {
36175        #[cfg_attr(
36176            target_arch = "aarch64",
36177            link_name = "llvm.aarch64.sve.frsqrte.x.nxv2f64"
36178        )]
36179        fn _svrsqrte_f64(op: svfloat64_t) -> svfloat64_t;
36180    }
36181    unsafe { _svrsqrte_f64(op) }
36182}
36183#[doc = "Reciprocal square root step"]
36184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f32])"]
36185#[inline(always)]
36186#[target_feature(enable = "sve")]
36187#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36188#[cfg_attr(test, assert_instr(frsqrts))]
36189pub fn svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
36190    unsafe extern "unadjusted" {
36191        #[cfg_attr(
36192            target_arch = "aarch64",
36193            link_name = "llvm.aarch64.sve.frsqrts.x.nxv4f32"
36194        )]
36195        fn _svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
36196    }
36197    unsafe { _svrsqrts_f32(op1, op2) }
36198}
36199#[doc = "Reciprocal square root step"]
36200#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f64])"]
36201#[inline(always)]
36202#[target_feature(enable = "sve")]
36203#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36204#[cfg_attr(test, assert_instr(frsqrts))]
36205pub fn svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
36206    unsafe extern "unadjusted" {
36207        #[cfg_attr(
36208            target_arch = "aarch64",
36209            link_name = "llvm.aarch64.sve.frsqrts.x.nxv2f64"
36210        )]
36211        fn _svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
36212    }
36213    unsafe { _svrsqrts_f64(op1, op2) }
36214}
36215#[doc = "Adjust exponent"]
36216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_m)"]
36217#[inline(always)]
36218#[target_feature(enable = "sve")]
36219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36220#[cfg_attr(test, assert_instr(fscale))]
36221pub fn svscale_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t {
36222    unsafe extern "unadjusted" {
36223        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv4f32")]
36224        fn _svscale_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t;
36225    }
36226    unsafe { _svscale_f32_m(pg.sve_into(), op1, op2) }
36227}
36228#[doc = "Adjust exponent"]
36229#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_m)"]
36230#[inline(always)]
36231#[target_feature(enable = "sve")]
36232#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36233#[cfg_attr(test, assert_instr(fscale))]
36234pub fn svscale_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t {
36235    svscale_f32_m(pg, op1, svdup_n_s32(op2))
36236}
36237#[doc = "Adjust exponent"]
36238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_x)"]
36239#[inline(always)]
36240#[target_feature(enable = "sve")]
36241#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36242#[cfg_attr(test, assert_instr(fscale))]
36243pub fn svscale_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t {
36244    svscale_f32_m(pg, op1, op2)
36245}
36246#[doc = "Adjust exponent"]
36247#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_x)"]
36248#[inline(always)]
36249#[target_feature(enable = "sve")]
36250#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36251#[cfg_attr(test, assert_instr(fscale))]
36252pub fn svscale_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t {
36253    svscale_f32_x(pg, op1, svdup_n_s32(op2))
36254}
36255#[doc = "Adjust exponent"]
36256#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_z)"]
36257#[inline(always)]
36258#[target_feature(enable = "sve")]
36259#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36260#[cfg_attr(test, assert_instr(fscale))]
36261pub fn svscale_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t {
36262    svscale_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
36263}
36264#[doc = "Adjust exponent"]
36265#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_z)"]
36266#[inline(always)]
36267#[target_feature(enable = "sve")]
36268#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36269#[cfg_attr(test, assert_instr(fscale))]
36270pub fn svscale_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t {
36271    svscale_f32_z(pg, op1, svdup_n_s32(op2))
36272}
36273#[doc = "Adjust exponent"]
36274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_m)"]
36275#[inline(always)]
36276#[target_feature(enable = "sve")]
36277#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36278#[cfg_attr(test, assert_instr(fscale))]
36279pub fn svscale_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t {
36280    unsafe extern "unadjusted" {
36281        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv2f64")]
36282        fn _svscale_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t;
36283    }
36284    unsafe { _svscale_f64_m(pg.sve_into(), op1, op2) }
36285}
36286#[doc = "Adjust exponent"]
36287#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_m)"]
36288#[inline(always)]
36289#[target_feature(enable = "sve")]
36290#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36291#[cfg_attr(test, assert_instr(fscale))]
36292pub fn svscale_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t {
36293    svscale_f64_m(pg, op1, svdup_n_s64(op2))
36294}
36295#[doc = "Adjust exponent"]
36296#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_x)"]
36297#[inline(always)]
36298#[target_feature(enable = "sve")]
36299#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36300#[cfg_attr(test, assert_instr(fscale))]
36301pub fn svscale_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t {
36302    svscale_f64_m(pg, op1, op2)
36303}
36304#[doc = "Adjust exponent"]
36305#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_x)"]
36306#[inline(always)]
36307#[target_feature(enable = "sve")]
36308#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36309#[cfg_attr(test, assert_instr(fscale))]
36310pub fn svscale_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t {
36311    svscale_f64_x(pg, op1, svdup_n_s64(op2))
36312}
36313#[doc = "Adjust exponent"]
36314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_z)"]
36315#[inline(always)]
36316#[target_feature(enable = "sve")]
36317#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36318#[cfg_attr(test, assert_instr(fscale))]
36319pub fn svscale_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t {
36320    svscale_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
36321}
36322#[doc = "Adjust exponent"]
36323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_z)"]
36324#[inline(always)]
36325#[target_feature(enable = "sve")]
36326#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36327#[cfg_attr(test, assert_instr(fscale))]
36328pub fn svscale_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t {
36329    svscale_f64_z(pg, op1, svdup_n_s64(op2))
36330}
36331#[doc = "Conditionally select elements"]
36332#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"]
36333#[inline(always)]
36334#[target_feature(enable = "sve")]
36335#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36336#[cfg_attr(test, assert_instr(sel))]
36337pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
36338    unsafe { simd_select(pg, op1, op2) }
36339}
36340#[doc = "Conditionally select elements"]
36341#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"]
36342#[inline(always)]
36343#[target_feature(enable = "sve")]
36344#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36345#[cfg_attr(test, assert_instr(sel))]
36346pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
36347    unsafe { simd_select::<svbool4_t, _>(pg.sve_into(), op1, op2) }
36348}
36349#[doc = "Conditionally select elements"]
36350#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"]
36351#[inline(always)]
36352#[target_feature(enable = "sve")]
36353#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36354#[cfg_attr(test, assert_instr(sel))]
36355pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
36356    unsafe { simd_select::<svbool2_t, _>(pg.sve_into(), op1, op2) }
36357}
36358#[doc = "Conditionally select elements"]
36359#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"]
36360#[inline(always)]
36361#[target_feature(enable = "sve")]
36362#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36363#[cfg_attr(test, assert_instr(sel))]
36364pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
36365    unsafe { simd_select::<svbool_t, _>(pg, op1, op2) }
36366}
36367#[doc = "Conditionally select elements"]
36368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"]
36369#[inline(always)]
36370#[target_feature(enable = "sve")]
36371#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36372#[cfg_attr(test, assert_instr(sel))]
36373pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
36374    unsafe { simd_select::<svbool8_t, _>(pg.sve_into(), op1, op2) }
36375}
36376#[doc = "Conditionally select elements"]
36377#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"]
36378#[inline(always)]
36379#[target_feature(enable = "sve")]
36380#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36381#[cfg_attr(test, assert_instr(sel))]
36382pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
36383    unsafe { simd_select::<svbool4_t, _>(pg.sve_into(), op1, op2) }
36384}
36385#[doc = "Conditionally select elements"]
36386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"]
36387#[inline(always)]
36388#[target_feature(enable = "sve")]
36389#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36390#[cfg_attr(test, assert_instr(sel))]
36391pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
36392    unsafe { simd_select::<svbool2_t, _>(pg.sve_into(), op1, op2) }
36393}
36394#[doc = "Conditionally select elements"]
36395#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"]
36396#[inline(always)]
36397#[target_feature(enable = "sve")]
36398#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36399#[cfg_attr(test, assert_instr(sel))]
36400pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
36401    unsafe { simd_select::<svbool_t, _>(pg, op1, op2) }
36402}
36403#[doc = "Conditionally select elements"]
36404#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"]
36405#[inline(always)]
36406#[target_feature(enable = "sve")]
36407#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36408#[cfg_attr(test, assert_instr(sel))]
36409pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
36410    unsafe { simd_select::<svbool8_t, _>(pg.sve_into(), op1, op2) }
36411}
36412#[doc = "Conditionally select elements"]
36413#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"]
36414#[inline(always)]
36415#[target_feature(enable = "sve")]
36416#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36417#[cfg_attr(test, assert_instr(sel))]
36418pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
36419    unsafe { simd_select::<svbool4_t, _>(pg.sve_into(), op1, op2) }
36420}
36421#[doc = "Conditionally select elements"]
36422#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"]
36423#[inline(always)]
36424#[target_feature(enable = "sve")]
36425#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36426#[cfg_attr(test, assert_instr(sel))]
36427pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
36428    unsafe { simd_select::<svbool2_t, _>(pg.sve_into(), op1, op2) }
36429}
36430#[doc = "Change one vector in a tuple of two vectors"]
36431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f32])"]
36432#[inline(always)]
36433#[target_feature(enable = "sve")]
36434#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36435pub fn svset2_f32<const IMM_INDEX: i32>(tuple: svfloat32x2_t, x: svfloat32_t) -> svfloat32x2_t {
36436    static_assert_range!(IMM_INDEX, 0..=1);
36437    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36438}
36439#[doc = "Change one vector in a tuple of two vectors"]
36440#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f64])"]
36441#[inline(always)]
36442#[target_feature(enable = "sve")]
36443#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36444pub fn svset2_f64<const IMM_INDEX: i32>(tuple: svfloat64x2_t, x: svfloat64_t) -> svfloat64x2_t {
36445    static_assert_range!(IMM_INDEX, 0..=1);
36446    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36447}
36448#[doc = "Change one vector in a tuple of two vectors"]
36449#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s8])"]
36450#[inline(always)]
36451#[target_feature(enable = "sve")]
36452#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36453pub fn svset2_s8<const IMM_INDEX: i32>(tuple: svint8x2_t, x: svint8_t) -> svint8x2_t {
36454    static_assert_range!(IMM_INDEX, 0..=1);
36455    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36456}
36457#[doc = "Change one vector in a tuple of two vectors"]
36458#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s16])"]
36459#[inline(always)]
36460#[target_feature(enable = "sve")]
36461#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36462pub fn svset2_s16<const IMM_INDEX: i32>(tuple: svint16x2_t, x: svint16_t) -> svint16x2_t {
36463    static_assert_range!(IMM_INDEX, 0..=1);
36464    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36465}
36466#[doc = "Change one vector in a tuple of two vectors"]
36467#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s32])"]
36468#[inline(always)]
36469#[target_feature(enable = "sve")]
36470#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36471pub fn svset2_s32<const IMM_INDEX: i32>(tuple: svint32x2_t, x: svint32_t) -> svint32x2_t {
36472    static_assert_range!(IMM_INDEX, 0..=1);
36473    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36474}
36475#[doc = "Change one vector in a tuple of two vectors"]
36476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s64])"]
36477#[inline(always)]
36478#[target_feature(enable = "sve")]
36479#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36480pub fn svset2_s64<const IMM_INDEX: i32>(tuple: svint64x2_t, x: svint64_t) -> svint64x2_t {
36481    static_assert_range!(IMM_INDEX, 0..=1);
36482    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36483}
36484#[doc = "Change one vector in a tuple of two vectors"]
36485#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u8])"]
36486#[inline(always)]
36487#[target_feature(enable = "sve")]
36488#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36489pub fn svset2_u8<const IMM_INDEX: i32>(tuple: svuint8x2_t, x: svuint8_t) -> svuint8x2_t {
36490    static_assert_range!(IMM_INDEX, 0..=1);
36491    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36492}
36493#[doc = "Change one vector in a tuple of two vectors"]
36494#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u16])"]
36495#[inline(always)]
36496#[target_feature(enable = "sve")]
36497#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36498pub fn svset2_u16<const IMM_INDEX: i32>(tuple: svuint16x2_t, x: svuint16_t) -> svuint16x2_t {
36499    static_assert_range!(IMM_INDEX, 0..=1);
36500    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36501}
36502#[doc = "Change one vector in a tuple of two vectors"]
36503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u32])"]
36504#[inline(always)]
36505#[target_feature(enable = "sve")]
36506#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36507pub fn svset2_u32<const IMM_INDEX: i32>(tuple: svuint32x2_t, x: svuint32_t) -> svuint32x2_t {
36508    static_assert_range!(IMM_INDEX, 0..=1);
36509    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36510}
36511#[doc = "Change one vector in a tuple of two vectors"]
36512#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u64])"]
36513#[inline(always)]
36514#[target_feature(enable = "sve")]
36515#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36516pub fn svset2_u64<const IMM_INDEX: i32>(tuple: svuint64x2_t, x: svuint64_t) -> svuint64x2_t {
36517    static_assert_range!(IMM_INDEX, 0..=1);
36518    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36519}
36520#[doc = "Change one vector in a tuple of three vectors"]
36521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f32])"]
36522#[inline(always)]
36523#[target_feature(enable = "sve")]
36524#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36525pub fn svset3_f32<const IMM_INDEX: i32>(tuple: svfloat32x3_t, x: svfloat32_t) -> svfloat32x3_t {
36526    static_assert_range!(IMM_INDEX, 0..=2);
36527    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36528}
36529#[doc = "Change one vector in a tuple of three vectors"]
36530#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f64])"]
36531#[inline(always)]
36532#[target_feature(enable = "sve")]
36533#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36534pub fn svset3_f64<const IMM_INDEX: i32>(tuple: svfloat64x3_t, x: svfloat64_t) -> svfloat64x3_t {
36535    static_assert_range!(IMM_INDEX, 0..=2);
36536    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36537}
36538#[doc = "Change one vector in a tuple of three vectors"]
36539#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s8])"]
36540#[inline(always)]
36541#[target_feature(enable = "sve")]
36542#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36543pub fn svset3_s8<const IMM_INDEX: i32>(tuple: svint8x3_t, x: svint8_t) -> svint8x3_t {
36544    static_assert_range!(IMM_INDEX, 0..=2);
36545    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36546}
36547#[doc = "Change one vector in a tuple of three vectors"]
36548#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s16])"]
36549#[inline(always)]
36550#[target_feature(enable = "sve")]
36551#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36552pub fn svset3_s16<const IMM_INDEX: i32>(tuple: svint16x3_t, x: svint16_t) -> svint16x3_t {
36553    static_assert_range!(IMM_INDEX, 0..=2);
36554    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36555}
36556#[doc = "Change one vector in a tuple of three vectors"]
36557#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s32])"]
36558#[inline(always)]
36559#[target_feature(enable = "sve")]
36560#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36561pub fn svset3_s32<const IMM_INDEX: i32>(tuple: svint32x3_t, x: svint32_t) -> svint32x3_t {
36562    static_assert_range!(IMM_INDEX, 0..=2);
36563    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36564}
36565#[doc = "Change one vector in a tuple of three vectors"]
36566#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s64])"]
36567#[inline(always)]
36568#[target_feature(enable = "sve")]
36569#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36570pub fn svset3_s64<const IMM_INDEX: i32>(tuple: svint64x3_t, x: svint64_t) -> svint64x3_t {
36571    static_assert_range!(IMM_INDEX, 0..=2);
36572    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36573}
36574#[doc = "Change one vector in a tuple of three vectors"]
36575#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u8])"]
36576#[inline(always)]
36577#[target_feature(enable = "sve")]
36578#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36579pub fn svset3_u8<const IMM_INDEX: i32>(tuple: svuint8x3_t, x: svuint8_t) -> svuint8x3_t {
36580    static_assert_range!(IMM_INDEX, 0..=2);
36581    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36582}
36583#[doc = "Change one vector in a tuple of three vectors"]
36584#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u16])"]
36585#[inline(always)]
36586#[target_feature(enable = "sve")]
36587#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36588pub fn svset3_u16<const IMM_INDEX: i32>(tuple: svuint16x3_t, x: svuint16_t) -> svuint16x3_t {
36589    static_assert_range!(IMM_INDEX, 0..=2);
36590    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36591}
36592#[doc = "Change one vector in a tuple of three vectors"]
36593#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u32])"]
36594#[inline(always)]
36595#[target_feature(enable = "sve")]
36596#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36597pub fn svset3_u32<const IMM_INDEX: i32>(tuple: svuint32x3_t, x: svuint32_t) -> svuint32x3_t {
36598    static_assert_range!(IMM_INDEX, 0..=2);
36599    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36600}
36601#[doc = "Change one vector in a tuple of three vectors"]
36602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u64])"]
36603#[inline(always)]
36604#[target_feature(enable = "sve")]
36605#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36606pub fn svset3_u64<const IMM_INDEX: i32>(tuple: svuint64x3_t, x: svuint64_t) -> svuint64x3_t {
36607    static_assert_range!(IMM_INDEX, 0..=2);
36608    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36609}
36610#[doc = "Change one vector in a tuple of four vectors"]
36611#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f32])"]
36612#[inline(always)]
36613#[target_feature(enable = "sve")]
36614#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36615pub fn svset4_f32<const IMM_INDEX: i32>(tuple: svfloat32x4_t, x: svfloat32_t) -> svfloat32x4_t {
36616    static_assert_range!(IMM_INDEX, 0..=3);
36617    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36618}
36619#[doc = "Change one vector in a tuple of four vectors"]
36620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f64])"]
36621#[inline(always)]
36622#[target_feature(enable = "sve")]
36623#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36624pub fn svset4_f64<const IMM_INDEX: i32>(tuple: svfloat64x4_t, x: svfloat64_t) -> svfloat64x4_t {
36625    static_assert_range!(IMM_INDEX, 0..=3);
36626    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36627}
36628#[doc = "Change one vector in a tuple of four vectors"]
36629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s8])"]
36630#[inline(always)]
36631#[target_feature(enable = "sve")]
36632#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36633pub fn svset4_s8<const IMM_INDEX: i32>(tuple: svint8x4_t, x: svint8_t) -> svint8x4_t {
36634    static_assert_range!(IMM_INDEX, 0..=3);
36635    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36636}
36637#[doc = "Change one vector in a tuple of four vectors"]
36638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s16])"]
36639#[inline(always)]
36640#[target_feature(enable = "sve")]
36641#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36642pub fn svset4_s16<const IMM_INDEX: i32>(tuple: svint16x4_t, x: svint16_t) -> svint16x4_t {
36643    static_assert_range!(IMM_INDEX, 0..=3);
36644    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36645}
36646#[doc = "Change one vector in a tuple of four vectors"]
36647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s32])"]
36648#[inline(always)]
36649#[target_feature(enable = "sve")]
36650#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36651pub fn svset4_s32<const IMM_INDEX: i32>(tuple: svint32x4_t, x: svint32_t) -> svint32x4_t {
36652    static_assert_range!(IMM_INDEX, 0..=3);
36653    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36654}
36655#[doc = "Change one vector in a tuple of four vectors"]
36656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s64])"]
36657#[inline(always)]
36658#[target_feature(enable = "sve")]
36659#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36660pub fn svset4_s64<const IMM_INDEX: i32>(tuple: svint64x4_t, x: svint64_t) -> svint64x4_t {
36661    static_assert_range!(IMM_INDEX, 0..=3);
36662    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36663}
36664#[doc = "Change one vector in a tuple of four vectors"]
36665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u8])"]
36666#[inline(always)]
36667#[target_feature(enable = "sve")]
36668#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36669pub fn svset4_u8<const IMM_INDEX: i32>(tuple: svuint8x4_t, x: svuint8_t) -> svuint8x4_t {
36670    static_assert_range!(IMM_INDEX, 0..=3);
36671    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36672}
36673#[doc = "Change one vector in a tuple of four vectors"]
36674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u16])"]
36675#[inline(always)]
36676#[target_feature(enable = "sve")]
36677#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36678pub fn svset4_u16<const IMM_INDEX: i32>(tuple: svuint16x4_t, x: svuint16_t) -> svuint16x4_t {
36679    static_assert_range!(IMM_INDEX, 0..=3);
36680    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36681}
36682#[doc = "Change one vector in a tuple of four vectors"]
36683#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u32])"]
36684#[inline(always)]
36685#[target_feature(enable = "sve")]
36686#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36687pub fn svset4_u32<const IMM_INDEX: i32>(tuple: svuint32x4_t, x: svuint32_t) -> svuint32x4_t {
36688    static_assert_range!(IMM_INDEX, 0..=3);
36689    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36690}
36691#[doc = "Change one vector in a tuple of four vectors"]
36692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u64])"]
36693#[inline(always)]
36694#[target_feature(enable = "sve")]
36695#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36696pub fn svset4_u64<const IMM_INDEX: i32>(tuple: svuint64x4_t, x: svuint64_t) -> svuint64x4_t {
36697    static_assert_range!(IMM_INDEX, 0..=3);
36698    unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) }
36699}
36700#[doc = "Initialize the first-fault register to all-true"]
36701#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsetffr)"]
36702#[inline(always)]
36703#[target_feature(enable = "sve")]
36704#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36705#[cfg_attr(test, assert_instr(setffr))]
36706pub fn svsetffr() {
36707    unsafe extern "unadjusted" {
36708        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.setffr")]
36709        fn _svsetffr();
36710    }
36711    unsafe { _svsetffr() }
36712}
36713#[doc = "Splice two vectors under predicate control"]
36714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f32])"]
36715#[inline(always)]
36716#[target_feature(enable = "sve")]
36717#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36718#[cfg_attr(test, assert_instr(splice))]
36719pub fn svsplice_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
36720    unsafe extern "unadjusted" {
36721        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4f32")]
36722        fn _svsplice_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
36723    }
36724    unsafe { _svsplice_f32(pg.sve_into(), op1, op2) }
36725}
36726#[doc = "Splice two vectors under predicate control"]
36727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f64])"]
36728#[inline(always)]
36729#[target_feature(enable = "sve")]
36730#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36731#[cfg_attr(test, assert_instr(splice))]
36732pub fn svsplice_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
36733    unsafe extern "unadjusted" {
36734        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2f64")]
36735        fn _svsplice_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
36736    }
36737    unsafe { _svsplice_f64(pg.sve_into(), op1, op2) }
36738}
36739#[doc = "Splice two vectors under predicate control"]
36740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s8])"]
36741#[inline(always)]
36742#[target_feature(enable = "sve")]
36743#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36744#[cfg_attr(test, assert_instr(splice))]
36745pub fn svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
36746    unsafe extern "unadjusted" {
36747        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv16i8")]
36748        fn _svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
36749    }
36750    unsafe { _svsplice_s8(pg, op1, op2) }
36751}
36752#[doc = "Splice two vectors under predicate control"]
36753#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s16])"]
36754#[inline(always)]
36755#[target_feature(enable = "sve")]
36756#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36757#[cfg_attr(test, assert_instr(splice))]
36758pub fn svsplice_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
36759    unsafe extern "unadjusted" {
36760        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv8i16")]
36761        fn _svsplice_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
36762    }
36763    unsafe { _svsplice_s16(pg.sve_into(), op1, op2) }
36764}
36765#[doc = "Splice two vectors under predicate control"]
36766#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s32])"]
36767#[inline(always)]
36768#[target_feature(enable = "sve")]
36769#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36770#[cfg_attr(test, assert_instr(splice))]
36771pub fn svsplice_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
36772    unsafe extern "unadjusted" {
36773        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4i32")]
36774        fn _svsplice_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
36775    }
36776    unsafe { _svsplice_s32(pg.sve_into(), op1, op2) }
36777}
36778#[doc = "Splice two vectors under predicate control"]
36779#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s64])"]
36780#[inline(always)]
36781#[target_feature(enable = "sve")]
36782#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36783#[cfg_attr(test, assert_instr(splice))]
36784pub fn svsplice_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
36785    unsafe extern "unadjusted" {
36786        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2i64")]
36787        fn _svsplice_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
36788    }
36789    unsafe { _svsplice_s64(pg.sve_into(), op1, op2) }
36790}
36791#[doc = "Splice two vectors under predicate control"]
36792#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u8])"]
36793#[inline(always)]
36794#[target_feature(enable = "sve")]
36795#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36796#[cfg_attr(test, assert_instr(splice))]
36797pub fn svsplice_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
36798    unsafe { svsplice_s8(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
36799}
36800#[doc = "Splice two vectors under predicate control"]
36801#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u16])"]
36802#[inline(always)]
36803#[target_feature(enable = "sve")]
36804#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36805#[cfg_attr(test, assert_instr(splice))]
36806pub fn svsplice_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
36807    unsafe { svsplice_s16(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
36808}
36809#[doc = "Splice two vectors under predicate control"]
36810#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u32])"]
36811#[inline(always)]
36812#[target_feature(enable = "sve")]
36813#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36814#[cfg_attr(test, assert_instr(splice))]
36815pub fn svsplice_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
36816    unsafe { svsplice_s32(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
36817}
36818#[doc = "Splice two vectors under predicate control"]
36819#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u64])"]
36820#[inline(always)]
36821#[target_feature(enable = "sve")]
36822#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36823#[cfg_attr(test, assert_instr(splice))]
36824pub fn svsplice_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
36825    unsafe { svsplice_s64(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
36826}
36827#[doc = "Square root"]
36828#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_m)"]
36829#[inline(always)]
36830#[target_feature(enable = "sve")]
36831#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36832#[cfg_attr(test, assert_instr(fsqrt))]
36833pub fn svsqrt_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
36834    unsafe extern "unadjusted" {
36835        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv4f32")]
36836        fn _svsqrt_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
36837    }
36838    unsafe { _svsqrt_f32_m(inactive, pg.sve_into(), op) }
36839}
36840#[doc = "Square root"]
36841#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_x)"]
36842#[inline(always)]
36843#[target_feature(enable = "sve")]
36844#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36845#[cfg_attr(test, assert_instr(fsqrt))]
36846pub fn svsqrt_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
36847    svsqrt_f32_m(op, pg, op)
36848}
36849#[doc = "Square root"]
36850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_z)"]
36851#[inline(always)]
36852#[target_feature(enable = "sve")]
36853#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36854#[cfg_attr(test, assert_instr(fsqrt))]
36855pub fn svsqrt_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
36856    svsqrt_f32_m(svdup_n_f32(0.0), pg, op)
36857}
36858#[doc = "Square root"]
36859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_m)"]
36860#[inline(always)]
36861#[target_feature(enable = "sve")]
36862#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36863#[cfg_attr(test, assert_instr(fsqrt))]
36864pub fn svsqrt_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36865    unsafe extern "unadjusted" {
36866        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv2f64")]
36867        fn _svsqrt_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
36868    }
36869    unsafe { _svsqrt_f64_m(inactive, pg.sve_into(), op) }
36870}
36871#[doc = "Square root"]
36872#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_x)"]
36873#[inline(always)]
36874#[target_feature(enable = "sve")]
36875#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36876#[cfg_attr(test, assert_instr(fsqrt))]
36877pub fn svsqrt_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36878    svsqrt_f64_m(op, pg, op)
36879}
36880#[doc = "Square root"]
36881#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_z)"]
36882#[inline(always)]
36883#[target_feature(enable = "sve")]
36884#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36885#[cfg_attr(test, assert_instr(fsqrt))]
36886pub fn svsqrt_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
36887    svsqrt_f64_m(svdup_n_f64(0.0), pg, op)
36888}
36889#[doc = "Non-truncating store"]
36890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"]
36891#[doc = "## Safety"]
36892#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
36893#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
36894#[inline(always)]
36895#[target_feature(enable = "sve")]
36896#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36897#[cfg_attr(test, assert_instr(st1w))]
36898pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) {
36899    unsafe extern "unadjusted" {
36900        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")]
36901        fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32);
36902    }
36903    _svst1_f32(data, pg.sve_into(), base)
36904}
36905#[doc = "Non-truncating store"]
36906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"]
36907#[doc = "## Safety"]
36908#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
36909#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
36910#[inline(always)]
36911#[target_feature(enable = "sve")]
36912#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36913#[cfg_attr(test, assert_instr(st1d))]
36914pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) {
36915    unsafe extern "unadjusted" {
36916        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")]
36917        fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64);
36918    }
36919    _svst1_f64(data, pg.sve_into(), base)
36920}
36921#[doc = "Non-truncating store"]
36922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"]
36923#[doc = "## Safety"]
36924#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
36925#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
36926#[inline(always)]
36927#[target_feature(enable = "sve")]
36928#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36929#[cfg_attr(test, assert_instr(st1b))]
36930pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) {
36931    unsafe extern "unadjusted" {
36932        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")]
36933        fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8);
36934    }
36935    _svst1_s8(data, pg, base)
36936}
36937#[doc = "Non-truncating store"]
36938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"]
36939#[doc = "## Safety"]
36940#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
36941#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
36942#[inline(always)]
36943#[target_feature(enable = "sve")]
36944#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36945#[cfg_attr(test, assert_instr(st1h))]
36946pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) {
36947    unsafe extern "unadjusted" {
36948        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")]
36949        fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16);
36950    }
36951    _svst1_s16(data, pg.sve_into(), base)
36952}
36953#[doc = "Non-truncating store"]
36954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"]
36955#[doc = "## Safety"]
36956#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
36957#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
36958#[inline(always)]
36959#[target_feature(enable = "sve")]
36960#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36961#[cfg_attr(test, assert_instr(st1w))]
36962pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) {
36963    unsafe extern "unadjusted" {
36964        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")]
36965        fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32);
36966    }
36967    _svst1_s32(data, pg.sve_into(), base)
36968}
36969#[doc = "Non-truncating store"]
36970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"]
36971#[doc = "## Safety"]
36972#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
36973#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
36974#[inline(always)]
36975#[target_feature(enable = "sve")]
36976#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36977#[cfg_attr(test, assert_instr(st1d))]
36978pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) {
36979    unsafe extern "unadjusted" {
36980        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")]
36981        fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64);
36982    }
36983    _svst1_s64(data, pg.sve_into(), base)
36984}
36985#[doc = "Non-truncating store"]
36986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"]
36987#[doc = "## Safety"]
36988#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
36989#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
36990#[inline(always)]
36991#[target_feature(enable = "sve")]
36992#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
36993#[cfg_attr(test, assert_instr(st1b))]
36994pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) {
36995    svst1_s8(pg, base.as_signed(), data.as_signed())
36996}
36997#[doc = "Non-truncating store"]
36998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"]
36999#[doc = "## Safety"]
37000#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37001#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37002#[inline(always)]
37003#[target_feature(enable = "sve")]
37004#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37005#[cfg_attr(test, assert_instr(st1h))]
37006pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) {
37007    svst1_s16(pg, base.as_signed(), data.as_signed())
37008}
37009#[doc = "Non-truncating store"]
37010#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"]
37011#[doc = "## Safety"]
37012#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37013#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37014#[inline(always)]
37015#[target_feature(enable = "sve")]
37016#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37017#[cfg_attr(test, assert_instr(st1w))]
37018pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) {
37019    svst1_s32(pg, base.as_signed(), data.as_signed())
37020}
37021#[doc = "Non-truncating store"]
37022#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"]
37023#[doc = "## Safety"]
37024#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37025#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37026#[inline(always)]
37027#[target_feature(enable = "sve")]
37028#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37029#[cfg_attr(test, assert_instr(st1d))]
37030pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) {
37031    svst1_s64(pg, base.as_signed(), data.as_signed())
37032}
37033#[doc = "Non-truncating store"]
37034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_f32])"]
37035#[doc = "## Safety"]
37036#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37037#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37038#[inline(always)]
37039#[target_feature(enable = "sve")]
37040#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37041#[cfg_attr(test, assert_instr(st1w))]
37042pub unsafe fn svst1_scatter_s32index_f32(
37043    pg: svbool_t,
37044    base: *mut f32,
37045    indices: svint32_t,
37046    data: svfloat32_t,
37047) {
37048    unsafe extern "unadjusted" {
37049        #[cfg_attr(
37050            target_arch = "aarch64",
37051            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4f32"
37052        )]
37053        fn _svst1_scatter_s32index_f32(
37054            data: svfloat32_t,
37055            pg: svbool4_t,
37056            base: *mut f32,
37057            indices: svint32_t,
37058        );
37059    }
37060    _svst1_scatter_s32index_f32(data, pg.sve_into(), base, indices)
37061}
37062#[doc = "Non-truncating store"]
37063#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_s32])"]
37064#[doc = "## Safety"]
37065#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37066#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37067#[inline(always)]
37068#[target_feature(enable = "sve")]
37069#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37070#[cfg_attr(test, assert_instr(st1w))]
37071pub unsafe fn svst1_scatter_s32index_s32(
37072    pg: svbool_t,
37073    base: *mut i32,
37074    indices: svint32_t,
37075    data: svint32_t,
37076) {
37077    unsafe extern "unadjusted" {
37078        #[cfg_attr(
37079            target_arch = "aarch64",
37080            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i32"
37081        )]
37082        fn _svst1_scatter_s32index_s32(
37083            data: svint32_t,
37084            pg: svbool4_t,
37085            base: *mut i32,
37086            indices: svint32_t,
37087        );
37088    }
37089    _svst1_scatter_s32index_s32(data, pg.sve_into(), base, indices)
37090}
37091#[doc = "Non-truncating store"]
37092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_u32])"]
37093#[doc = "## Safety"]
37094#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37095#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37096#[inline(always)]
37097#[target_feature(enable = "sve")]
37098#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37099#[cfg_attr(test, assert_instr(st1w))]
37100pub unsafe fn svst1_scatter_s32index_u32(
37101    pg: svbool_t,
37102    base: *mut u32,
37103    indices: svint32_t,
37104    data: svuint32_t,
37105) {
37106    svst1_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed())
37107}
37108#[doc = "Non-truncating store"]
37109#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_f64])"]
37110#[doc = "## Safety"]
37111#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37112#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37113#[inline(always)]
37114#[target_feature(enable = "sve")]
37115#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37116#[cfg_attr(test, assert_instr(st1d))]
37117pub unsafe fn svst1_scatter_s64index_f64(
37118    pg: svbool_t,
37119    base: *mut f64,
37120    indices: svint64_t,
37121    data: svfloat64_t,
37122) {
37123    unsafe extern "unadjusted" {
37124        #[cfg_attr(
37125            target_arch = "aarch64",
37126            link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2f64"
37127        )]
37128        fn _svst1_scatter_s64index_f64(
37129            data: svfloat64_t,
37130            pg: svbool2_t,
37131            base: *mut f64,
37132            indices: svint64_t,
37133        );
37134    }
37135    _svst1_scatter_s64index_f64(data, pg.sve_into(), base, indices)
37136}
37137#[doc = "Non-truncating store"]
37138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_s64])"]
37139#[doc = "## Safety"]
37140#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37141#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37142#[inline(always)]
37143#[target_feature(enable = "sve")]
37144#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37145#[cfg_attr(test, assert_instr(st1d))]
37146pub unsafe fn svst1_scatter_s64index_s64(
37147    pg: svbool_t,
37148    base: *mut i64,
37149    indices: svint64_t,
37150    data: svint64_t,
37151) {
37152    unsafe extern "unadjusted" {
37153        #[cfg_attr(
37154            target_arch = "aarch64",
37155            link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i64"
37156        )]
37157        fn _svst1_scatter_s64index_s64(
37158            data: svint64_t,
37159            pg: svbool2_t,
37160            base: *mut i64,
37161            indices: svint64_t,
37162        );
37163    }
37164    _svst1_scatter_s64index_s64(data, pg.sve_into(), base, indices)
37165}
37166#[doc = "Non-truncating store"]
37167#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_u64])"]
37168#[doc = "## Safety"]
37169#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37170#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37171#[inline(always)]
37172#[target_feature(enable = "sve")]
37173#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37174#[cfg_attr(test, assert_instr(st1d))]
37175pub unsafe fn svst1_scatter_s64index_u64(
37176    pg: svbool_t,
37177    base: *mut u64,
37178    indices: svint64_t,
37179    data: svuint64_t,
37180) {
37181    svst1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
37182}
37183#[doc = "Non-truncating store"]
37184#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_f32])"]
37185#[doc = "## Safety"]
37186#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37187#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37188#[inline(always)]
37189#[target_feature(enable = "sve")]
37190#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37191#[cfg_attr(test, assert_instr(st1w))]
37192pub unsafe fn svst1_scatter_u32index_f32(
37193    pg: svbool_t,
37194    base: *mut f32,
37195    indices: svuint32_t,
37196    data: svfloat32_t,
37197) {
37198    unsafe extern "unadjusted" {
37199        #[cfg_attr(
37200            target_arch = "aarch64",
37201            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4f32"
37202        )]
37203        fn _svst1_scatter_u32index_f32(
37204            data: svfloat32_t,
37205            pg: svbool4_t,
37206            base: *mut f32,
37207            indices: svint32_t,
37208        );
37209    }
37210    _svst1_scatter_u32index_f32(data, pg.sve_into(), base, indices.as_signed())
37211}
37212#[doc = "Non-truncating store"]
37213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_s32])"]
37214#[doc = "## Safety"]
37215#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37216#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37217#[inline(always)]
37218#[target_feature(enable = "sve")]
37219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37220#[cfg_attr(test, assert_instr(st1w))]
37221pub unsafe fn svst1_scatter_u32index_s32(
37222    pg: svbool_t,
37223    base: *mut i32,
37224    indices: svuint32_t,
37225    data: svint32_t,
37226) {
37227    unsafe extern "unadjusted" {
37228        #[cfg_attr(
37229            target_arch = "aarch64",
37230            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i32"
37231        )]
37232        fn _svst1_scatter_u32index_s32(
37233            data: svint32_t,
37234            pg: svbool4_t,
37235            base: *mut i32,
37236            indices: svint32_t,
37237        );
37238    }
37239    _svst1_scatter_u32index_s32(data, pg.sve_into(), base, indices.as_signed())
37240}
37241#[doc = "Non-truncating store"]
37242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_u32])"]
37243#[doc = "## Safety"]
37244#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37245#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37246#[inline(always)]
37247#[target_feature(enable = "sve")]
37248#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37249#[cfg_attr(test, assert_instr(st1w))]
37250pub unsafe fn svst1_scatter_u32index_u32(
37251    pg: svbool_t,
37252    base: *mut u32,
37253    indices: svuint32_t,
37254    data: svuint32_t,
37255) {
37256    svst1_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed())
37257}
37258#[doc = "Non-truncating store"]
37259#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_f64])"]
37260#[doc = "## Safety"]
37261#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37262#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37263#[inline(always)]
37264#[target_feature(enable = "sve")]
37265#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37266#[cfg_attr(test, assert_instr(st1d))]
37267pub unsafe fn svst1_scatter_u64index_f64(
37268    pg: svbool_t,
37269    base: *mut f64,
37270    indices: svuint64_t,
37271    data: svfloat64_t,
37272) {
37273    svst1_scatter_s64index_f64(pg, base, indices.as_signed(), data)
37274}
37275#[doc = "Non-truncating store"]
37276#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_s64])"]
37277#[doc = "## Safety"]
37278#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37279#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37280#[inline(always)]
37281#[target_feature(enable = "sve")]
37282#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37283#[cfg_attr(test, assert_instr(st1d))]
37284pub unsafe fn svst1_scatter_u64index_s64(
37285    pg: svbool_t,
37286    base: *mut i64,
37287    indices: svuint64_t,
37288    data: svint64_t,
37289) {
37290    svst1_scatter_s64index_s64(pg, base, indices.as_signed(), data)
37291}
37292#[doc = "Non-truncating store"]
37293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_u64])"]
37294#[doc = "## Safety"]
37295#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37296#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37297#[inline(always)]
37298#[target_feature(enable = "sve")]
37299#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37300#[cfg_attr(test, assert_instr(st1d))]
37301pub unsafe fn svst1_scatter_u64index_u64(
37302    pg: svbool_t,
37303    base: *mut u64,
37304    indices: svuint64_t,
37305    data: svuint64_t,
37306) {
37307    svst1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
37308}
37309#[doc = "Non-truncating store"]
37310#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_f32])"]
37311#[doc = "## Safety"]
37312#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37313#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37314#[inline(always)]
37315#[target_feature(enable = "sve")]
37316#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37317#[cfg_attr(test, assert_instr(st1w))]
37318pub unsafe fn svst1_scatter_s32offset_f32(
37319    pg: svbool_t,
37320    base: *mut f32,
37321    offsets: svint32_t,
37322    data: svfloat32_t,
37323) {
37324    unsafe extern "unadjusted" {
37325        #[cfg_attr(
37326            target_arch = "aarch64",
37327            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32"
37328        )]
37329        fn _svst1_scatter_s32offset_f32(
37330            data: svfloat32_t,
37331            pg: svbool4_t,
37332            base: *mut f32,
37333            offsets: svint32_t,
37334        );
37335    }
37336    _svst1_scatter_s32offset_f32(data, pg.sve_into(), base, offsets)
37337}
37338#[doc = "Non-truncating store"]
37339#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_s32])"]
37340#[doc = "## Safety"]
37341#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37342#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37343#[inline(always)]
37344#[target_feature(enable = "sve")]
37345#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37346#[cfg_attr(test, assert_instr(st1w))]
37347pub unsafe fn svst1_scatter_s32offset_s32(
37348    pg: svbool_t,
37349    base: *mut i32,
37350    offsets: svint32_t,
37351    data: svint32_t,
37352) {
37353    unsafe extern "unadjusted" {
37354        #[cfg_attr(
37355            target_arch = "aarch64",
37356            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32"
37357        )]
37358        fn _svst1_scatter_s32offset_s32(
37359            data: svint32_t,
37360            pg: svbool4_t,
37361            base: *mut i32,
37362            offsets: svint32_t,
37363        );
37364    }
37365    _svst1_scatter_s32offset_s32(data, pg.sve_into(), base, offsets)
37366}
37367#[doc = "Non-truncating store"]
37368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_u32])"]
37369#[doc = "## Safety"]
37370#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37371#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37372#[inline(always)]
37373#[target_feature(enable = "sve")]
37374#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37375#[cfg_attr(test, assert_instr(st1w))]
37376pub unsafe fn svst1_scatter_s32offset_u32(
37377    pg: svbool_t,
37378    base: *mut u32,
37379    offsets: svint32_t,
37380    data: svuint32_t,
37381) {
37382    svst1_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
37383}
37384#[doc = "Non-truncating store"]
37385#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_f64])"]
37386#[doc = "## Safety"]
37387#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37388#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37389#[inline(always)]
37390#[target_feature(enable = "sve")]
37391#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37392#[cfg_attr(test, assert_instr(st1d))]
37393pub unsafe fn svst1_scatter_s64offset_f64(
37394    pg: svbool_t,
37395    base: *mut f64,
37396    offsets: svint64_t,
37397    data: svfloat64_t,
37398) {
37399    unsafe extern "unadjusted" {
37400        #[cfg_attr(
37401            target_arch = "aarch64",
37402            link_name = "llvm.aarch64.sve.st1.scatter.nxv2f64"
37403        )]
37404        fn _svst1_scatter_s64offset_f64(
37405            data: svfloat64_t,
37406            pg: svbool2_t,
37407            base: *mut f64,
37408            offsets: svint64_t,
37409        );
37410    }
37411    _svst1_scatter_s64offset_f64(data, pg.sve_into(), base, offsets)
37412}
37413#[doc = "Non-truncating store"]
37414#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_s64])"]
37415#[doc = "## Safety"]
37416#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37417#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37418#[inline(always)]
37419#[target_feature(enable = "sve")]
37420#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37421#[cfg_attr(test, assert_instr(st1d))]
37422pub unsafe fn svst1_scatter_s64offset_s64(
37423    pg: svbool_t,
37424    base: *mut i64,
37425    offsets: svint64_t,
37426    data: svint64_t,
37427) {
37428    unsafe extern "unadjusted" {
37429        #[cfg_attr(
37430            target_arch = "aarch64",
37431            link_name = "llvm.aarch64.sve.st1.scatter.nxv2i64"
37432        )]
37433        fn _svst1_scatter_s64offset_s64(
37434            data: svint64_t,
37435            pg: svbool2_t,
37436            base: *mut i64,
37437            offsets: svint64_t,
37438        );
37439    }
37440    _svst1_scatter_s64offset_s64(data, pg.sve_into(), base, offsets)
37441}
37442#[doc = "Non-truncating store"]
37443#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_u64])"]
37444#[doc = "## Safety"]
37445#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37446#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37447#[inline(always)]
37448#[target_feature(enable = "sve")]
37449#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37450#[cfg_attr(test, assert_instr(st1d))]
37451pub unsafe fn svst1_scatter_s64offset_u64(
37452    pg: svbool_t,
37453    base: *mut u64,
37454    offsets: svint64_t,
37455    data: svuint64_t,
37456) {
37457    svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
37458}
37459#[doc = "Non-truncating store"]
37460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_f32])"]
37461#[doc = "## Safety"]
37462#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37463#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37464#[inline(always)]
37465#[target_feature(enable = "sve")]
37466#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37467#[cfg_attr(test, assert_instr(st1w))]
37468pub unsafe fn svst1_scatter_u32offset_f32(
37469    pg: svbool_t,
37470    base: *mut f32,
37471    offsets: svuint32_t,
37472    data: svfloat32_t,
37473) {
37474    unsafe extern "unadjusted" {
37475        #[cfg_attr(
37476            target_arch = "aarch64",
37477            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32"
37478        )]
37479        fn _svst1_scatter_u32offset_f32(
37480            data: svfloat32_t,
37481            pg: svbool4_t,
37482            base: *mut f32,
37483            offsets: svint32_t,
37484        );
37485    }
37486    _svst1_scatter_u32offset_f32(data, pg.sve_into(), base, offsets.as_signed())
37487}
37488#[doc = "Non-truncating store"]
37489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_s32])"]
37490#[doc = "## Safety"]
37491#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37492#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37493#[inline(always)]
37494#[target_feature(enable = "sve")]
37495#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37496#[cfg_attr(test, assert_instr(st1w))]
37497pub unsafe fn svst1_scatter_u32offset_s32(
37498    pg: svbool_t,
37499    base: *mut i32,
37500    offsets: svuint32_t,
37501    data: svint32_t,
37502) {
37503    unsafe extern "unadjusted" {
37504        #[cfg_attr(
37505            target_arch = "aarch64",
37506            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32"
37507        )]
37508        fn _svst1_scatter_u32offset_s32(
37509            data: svint32_t,
37510            pg: svbool4_t,
37511            base: *mut i32,
37512            offsets: svint32_t,
37513        );
37514    }
37515    _svst1_scatter_u32offset_s32(data, pg.sve_into(), base, offsets.as_signed())
37516}
37517#[doc = "Non-truncating store"]
37518#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_u32])"]
37519#[doc = "## Safety"]
37520#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37521#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37522#[inline(always)]
37523#[target_feature(enable = "sve")]
37524#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37525#[cfg_attr(test, assert_instr(st1w))]
37526pub unsafe fn svst1_scatter_u32offset_u32(
37527    pg: svbool_t,
37528    base: *mut u32,
37529    offsets: svuint32_t,
37530    data: svuint32_t,
37531) {
37532    svst1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
37533}
37534#[doc = "Non-truncating store"]
37535#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_f64])"]
37536#[doc = "## Safety"]
37537#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37538#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37539#[inline(always)]
37540#[target_feature(enable = "sve")]
37541#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37542#[cfg_attr(test, assert_instr(st1d))]
37543pub unsafe fn svst1_scatter_u64offset_f64(
37544    pg: svbool_t,
37545    base: *mut f64,
37546    offsets: svuint64_t,
37547    data: svfloat64_t,
37548) {
37549    svst1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data)
37550}
37551#[doc = "Non-truncating store"]
37552#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_s64])"]
37553#[doc = "## Safety"]
37554#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37555#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37556#[inline(always)]
37557#[target_feature(enable = "sve")]
37558#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37559#[cfg_attr(test, assert_instr(st1d))]
37560pub unsafe fn svst1_scatter_u64offset_s64(
37561    pg: svbool_t,
37562    base: *mut i64,
37563    offsets: svuint64_t,
37564    data: svint64_t,
37565) {
37566    svst1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
37567}
37568#[doc = "Non-truncating store"]
37569#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_u64])"]
37570#[doc = "## Safety"]
37571#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37572#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37573#[inline(always)]
37574#[target_feature(enable = "sve")]
37575#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37576#[cfg_attr(test, assert_instr(st1d))]
37577pub unsafe fn svst1_scatter_u64offset_u64(
37578    pg: svbool_t,
37579    base: *mut u64,
37580    offsets: svuint64_t,
37581    data: svuint64_t,
37582) {
37583    svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
37584}
37585#[doc = "Non-truncating store"]
37586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_f32])"]
37587#[doc = "## Safety"]
37588#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37589#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37590#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37591#[inline(always)]
37592#[target_feature(enable = "sve")]
37593#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37594#[cfg_attr(test, assert_instr(st1w))]
37595pub unsafe fn svst1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) {
37596    svst1_scatter_u32base_offset_f32(pg, bases, 0, data)
37597}
37598#[doc = "Non-truncating store"]
37599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_s32])"]
37600#[doc = "## Safety"]
37601#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37602#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37603#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37604#[inline(always)]
37605#[target_feature(enable = "sve")]
37606#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37607#[cfg_attr(test, assert_instr(st1w))]
37608pub unsafe fn svst1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
37609    svst1_scatter_u32base_offset_s32(pg, bases, 0, data)
37610}
37611#[doc = "Non-truncating store"]
37612#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_u32])"]
37613#[doc = "## Safety"]
37614#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37615#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37616#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37617#[inline(always)]
37618#[target_feature(enable = "sve")]
37619#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37620#[cfg_attr(test, assert_instr(st1w))]
37621pub unsafe fn svst1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
37622    svst1_scatter_u32base_offset_u32(pg, bases, 0, data)
37623}
37624#[doc = "Non-truncating store"]
37625#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_f64])"]
37626#[doc = "## Safety"]
37627#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37628#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37629#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37630#[inline(always)]
37631#[target_feature(enable = "sve")]
37632#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37633#[cfg_attr(test, assert_instr(st1d))]
37634pub unsafe fn svst1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) {
37635    svst1_scatter_u64base_offset_f64(pg, bases, 0, data)
37636}
37637#[doc = "Non-truncating store"]
37638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_s64])"]
37639#[doc = "## Safety"]
37640#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37641#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37642#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37643#[inline(always)]
37644#[target_feature(enable = "sve")]
37645#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37646#[cfg_attr(test, assert_instr(st1d))]
37647pub unsafe fn svst1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
37648    svst1_scatter_u64base_offset_s64(pg, bases, 0, data)
37649}
37650#[doc = "Non-truncating store"]
37651#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_u64])"]
37652#[doc = "## Safety"]
37653#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37654#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37655#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37656#[inline(always)]
37657#[target_feature(enable = "sve")]
37658#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37659#[cfg_attr(test, assert_instr(st1d))]
37660pub unsafe fn svst1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
37661    svst1_scatter_u64base_offset_u64(pg, bases, 0, data)
37662}
37663#[doc = "Non-truncating store"]
37664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_f32])"]
37665#[doc = "## Safety"]
37666#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37667#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37668#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37669#[inline(always)]
37670#[target_feature(enable = "sve")]
37671#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37672#[cfg_attr(test, assert_instr(st1w))]
37673pub unsafe fn svst1_scatter_u32base_index_f32(
37674    pg: svbool_t,
37675    bases: svuint32_t,
37676    index: i64,
37677    data: svfloat32_t,
37678) {
37679    svst1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data)
37680}
37681#[doc = "Non-truncating store"]
37682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_s32])"]
37683#[doc = "## Safety"]
37684#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37685#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37686#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37687#[inline(always)]
37688#[target_feature(enable = "sve")]
37689#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37690#[cfg_attr(test, assert_instr(st1w))]
37691pub unsafe fn svst1_scatter_u32base_index_s32(
37692    pg: svbool_t,
37693    bases: svuint32_t,
37694    index: i64,
37695    data: svint32_t,
37696) {
37697    svst1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data)
37698}
37699#[doc = "Non-truncating store"]
37700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_u32])"]
37701#[doc = "## Safety"]
37702#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37703#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37704#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37705#[inline(always)]
37706#[target_feature(enable = "sve")]
37707#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37708#[cfg_attr(test, assert_instr(st1w))]
37709pub unsafe fn svst1_scatter_u32base_index_u32(
37710    pg: svbool_t,
37711    bases: svuint32_t,
37712    index: i64,
37713    data: svuint32_t,
37714) {
37715    svst1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data)
37716}
37717#[doc = "Non-truncating store"]
37718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_f64])"]
37719#[doc = "## Safety"]
37720#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37721#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37722#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37723#[inline(always)]
37724#[target_feature(enable = "sve")]
37725#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37726#[cfg_attr(test, assert_instr(st1d))]
37727pub unsafe fn svst1_scatter_u64base_index_f64(
37728    pg: svbool_t,
37729    bases: svuint64_t,
37730    index: i64,
37731    data: svfloat64_t,
37732) {
37733    svst1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data)
37734}
37735#[doc = "Non-truncating store"]
37736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_s64])"]
37737#[doc = "## Safety"]
37738#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37739#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37740#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37741#[inline(always)]
37742#[target_feature(enable = "sve")]
37743#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37744#[cfg_attr(test, assert_instr(st1d))]
37745pub unsafe fn svst1_scatter_u64base_index_s64(
37746    pg: svbool_t,
37747    bases: svuint64_t,
37748    index: i64,
37749    data: svint64_t,
37750) {
37751    svst1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data)
37752}
37753#[doc = "Non-truncating store"]
37754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_u64])"]
37755#[doc = "## Safety"]
37756#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37757#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37758#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37759#[inline(always)]
37760#[target_feature(enable = "sve")]
37761#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37762#[cfg_attr(test, assert_instr(st1d))]
37763pub unsafe fn svst1_scatter_u64base_index_u64(
37764    pg: svbool_t,
37765    bases: svuint64_t,
37766    index: i64,
37767    data: svuint64_t,
37768) {
37769    svst1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data)
37770}
37771#[doc = "Non-truncating store"]
37772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_f32])"]
37773#[doc = "## Safety"]
37774#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37775#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37776#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37777#[inline(always)]
37778#[target_feature(enable = "sve")]
37779#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37780#[cfg_attr(test, assert_instr(st1w))]
37781pub unsafe fn svst1_scatter_u32base_offset_f32(
37782    pg: svbool_t,
37783    bases: svuint32_t,
37784    offset: i64,
37785    data: svfloat32_t,
37786) {
37787    unsafe extern "unadjusted" {
37788        #[cfg_attr(
37789            target_arch = "aarch64",
37790            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32"
37791        )]
37792        fn _svst1_scatter_u32base_offset_f32(
37793            data: svfloat32_t,
37794            pg: svbool4_t,
37795            bases: svint32_t,
37796            offset: i64,
37797        );
37798    }
37799    _svst1_scatter_u32base_offset_f32(data, pg.sve_into(), bases.as_signed(), offset)
37800}
37801#[doc = "Non-truncating store"]
37802#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_s32])"]
37803#[doc = "## Safety"]
37804#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37805#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37806#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37807#[inline(always)]
37808#[target_feature(enable = "sve")]
37809#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37810#[cfg_attr(test, assert_instr(st1w))]
37811pub unsafe fn svst1_scatter_u32base_offset_s32(
37812    pg: svbool_t,
37813    bases: svuint32_t,
37814    offset: i64,
37815    data: svint32_t,
37816) {
37817    unsafe extern "unadjusted" {
37818        #[cfg_attr(
37819            target_arch = "aarch64",
37820            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32"
37821        )]
37822        fn _svst1_scatter_u32base_offset_s32(
37823            data: svint32_t,
37824            pg: svbool4_t,
37825            bases: svint32_t,
37826            offset: i64,
37827        );
37828    }
37829    _svst1_scatter_u32base_offset_s32(data, pg.sve_into(), bases.as_signed(), offset)
37830}
37831#[doc = "Non-truncating store"]
37832#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_u32])"]
37833#[doc = "## Safety"]
37834#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37835#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37836#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37837#[inline(always)]
37838#[target_feature(enable = "sve")]
37839#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37840#[cfg_attr(test, assert_instr(st1w))]
37841pub unsafe fn svst1_scatter_u32base_offset_u32(
37842    pg: svbool_t,
37843    bases: svuint32_t,
37844    offset: i64,
37845    data: svuint32_t,
37846) {
37847    svst1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
37848}
37849#[doc = "Non-truncating store"]
37850#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_f64])"]
37851#[doc = "## Safety"]
37852#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37853#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37854#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37855#[inline(always)]
37856#[target_feature(enable = "sve")]
37857#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37858#[cfg_attr(test, assert_instr(st1d))]
37859pub unsafe fn svst1_scatter_u64base_offset_f64(
37860    pg: svbool_t,
37861    bases: svuint64_t,
37862    offset: i64,
37863    data: svfloat64_t,
37864) {
37865    unsafe extern "unadjusted" {
37866        #[cfg_attr(
37867            target_arch = "aarch64",
37868            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64"
37869        )]
37870        fn _svst1_scatter_u64base_offset_f64(
37871            data: svfloat64_t,
37872            pg: svbool2_t,
37873            bases: svint64_t,
37874            offset: i64,
37875        );
37876    }
37877    _svst1_scatter_u64base_offset_f64(data, pg.sve_into(), bases.as_signed(), offset)
37878}
37879#[doc = "Non-truncating store"]
37880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_s64])"]
37881#[doc = "## Safety"]
37882#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37883#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37884#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37885#[inline(always)]
37886#[target_feature(enable = "sve")]
37887#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37888#[cfg_attr(test, assert_instr(st1d))]
37889pub unsafe fn svst1_scatter_u64base_offset_s64(
37890    pg: svbool_t,
37891    bases: svuint64_t,
37892    offset: i64,
37893    data: svint64_t,
37894) {
37895    unsafe extern "unadjusted" {
37896        #[cfg_attr(
37897            target_arch = "aarch64",
37898            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64"
37899        )]
37900        fn _svst1_scatter_u64base_offset_s64(
37901            data: svint64_t,
37902            pg: svbool2_t,
37903            bases: svint64_t,
37904            offset: i64,
37905        );
37906    }
37907    _svst1_scatter_u64base_offset_s64(data, pg.sve_into(), bases.as_signed(), offset)
37908}
37909#[doc = "Non-truncating store"]
37910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_u64])"]
37911#[doc = "## Safety"]
37912#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37913#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37914#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
37915#[inline(always)]
37916#[target_feature(enable = "sve")]
37917#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37918#[cfg_attr(test, assert_instr(st1d))]
37919pub unsafe fn svst1_scatter_u64base_offset_u64(
37920    pg: svbool_t,
37921    bases: svuint64_t,
37922    offset: i64,
37923    data: svuint64_t,
37924) {
37925    svst1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
37926}
37927#[doc = "Non-truncating store"]
37928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f32])"]
37929#[doc = "## Safety"]
37930#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37931#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37932#[inline(always)]
37933#[target_feature(enable = "sve")]
37934#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37935#[cfg_attr(test, assert_instr(st1w))]
37936pub unsafe fn svst1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) {
37937    svst1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data)
37938}
37939#[doc = "Non-truncating store"]
37940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f64])"]
37941#[doc = "## Safety"]
37942#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37943#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37944#[inline(always)]
37945#[target_feature(enable = "sve")]
37946#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37947#[cfg_attr(test, assert_instr(st1d))]
37948pub unsafe fn svst1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) {
37949    svst1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data)
37950}
37951#[doc = "Non-truncating store"]
37952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s8])"]
37953#[doc = "## Safety"]
37954#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37955#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37956#[inline(always)]
37957#[target_feature(enable = "sve")]
37958#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37959#[cfg_attr(test, assert_instr(st1b))]
37960pub unsafe fn svst1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) {
37961    svst1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data)
37962}
37963#[doc = "Non-truncating store"]
37964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s16])"]
37965#[doc = "## Safety"]
37966#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37967#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37968#[inline(always)]
37969#[target_feature(enable = "sve")]
37970#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37971#[cfg_attr(test, assert_instr(st1h))]
37972pub unsafe fn svst1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) {
37973    svst1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
37974}
37975#[doc = "Non-truncating store"]
37976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s32])"]
37977#[doc = "## Safety"]
37978#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37979#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37980#[inline(always)]
37981#[target_feature(enable = "sve")]
37982#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37983#[cfg_attr(test, assert_instr(st1w))]
37984pub unsafe fn svst1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) {
37985    svst1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
37986}
37987#[doc = "Non-truncating store"]
37988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s64])"]
37989#[doc = "## Safety"]
37990#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
37991#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
37992#[inline(always)]
37993#[target_feature(enable = "sve")]
37994#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
37995#[cfg_attr(test, assert_instr(st1d))]
37996pub unsafe fn svst1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) {
37997    svst1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
37998}
37999#[doc = "Non-truncating store"]
38000#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u8])"]
38001#[doc = "## Safety"]
38002#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38003#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38004#[inline(always)]
38005#[target_feature(enable = "sve")]
38006#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38007#[cfg_attr(test, assert_instr(st1b))]
38008pub unsafe fn svst1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) {
38009    svst1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data)
38010}
38011#[doc = "Non-truncating store"]
38012#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u16])"]
38013#[doc = "## Safety"]
38014#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38015#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38016#[inline(always)]
38017#[target_feature(enable = "sve")]
38018#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38019#[cfg_attr(test, assert_instr(st1h))]
38020pub unsafe fn svst1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) {
38021    svst1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
38022}
38023#[doc = "Non-truncating store"]
38024#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u32])"]
38025#[doc = "## Safety"]
38026#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38027#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38028#[inline(always)]
38029#[target_feature(enable = "sve")]
38030#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38031#[cfg_attr(test, assert_instr(st1w))]
38032pub unsafe fn svst1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) {
38033    svst1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
38034}
38035#[doc = "Non-truncating store"]
38036#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u64])"]
38037#[doc = "## Safety"]
38038#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38039#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38040#[inline(always)]
38041#[target_feature(enable = "sve")]
38042#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38043#[cfg_attr(test, assert_instr(st1d))]
38044pub unsafe fn svst1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) {
38045    svst1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
38046}
38047#[doc = "Truncate to 8 bits and store"]
38048#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s16])"]
38049#[doc = "## Safety"]
38050#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38051#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38052#[inline(always)]
38053#[target_feature(enable = "sve")]
38054#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38055#[cfg_attr(test, assert_instr(st1b))]
38056pub unsafe fn svst1b_s16(pg: svbool_t, base: *mut i8, data: svint16_t) {
38057    unsafe extern "unadjusted" {
38058        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i8")]
38059        fn _svst1b_s16(data: nxv8i8, pg: svbool8_t, ptr: *mut i8);
38060    }
38061    _svst1b_s16(
38062        crate::intrinsics::simd::simd_cast(data),
38063        pg.sve_into(),
38064        base,
38065    )
38066}
38067#[doc = "Truncate to 8 bits and store"]
38068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s32])"]
38069#[doc = "## Safety"]
38070#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38071#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38072#[inline(always)]
38073#[target_feature(enable = "sve")]
38074#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38075#[cfg_attr(test, assert_instr(st1b))]
38076pub unsafe fn svst1b_s32(pg: svbool_t, base: *mut i8, data: svint32_t) {
38077    unsafe extern "unadjusted" {
38078        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i8")]
38079        fn _svst1b_s32(data: nxv4i8, pg: svbool4_t, ptr: *mut i8);
38080    }
38081    _svst1b_s32(
38082        crate::intrinsics::simd::simd_cast(data),
38083        pg.sve_into(),
38084        base,
38085    )
38086}
38087#[doc = "Truncate to 16 bits and store"]
38088#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s32])"]
38089#[doc = "## Safety"]
38090#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38091#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38092#[inline(always)]
38093#[target_feature(enable = "sve")]
38094#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38095#[cfg_attr(test, assert_instr(st1h))]
38096pub unsafe fn svst1h_s32(pg: svbool_t, base: *mut i16, data: svint32_t) {
38097    unsafe extern "unadjusted" {
38098        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i16")]
38099        fn _svst1h_s32(data: nxv4i16, pg: svbool4_t, ptr: *mut i16);
38100    }
38101    _svst1h_s32(
38102        crate::intrinsics::simd::simd_cast(data),
38103        pg.sve_into(),
38104        base,
38105    )
38106}
38107#[doc = "Truncate to 8 bits and store"]
38108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s64])"]
38109#[doc = "## Safety"]
38110#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38111#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38112#[inline(always)]
38113#[target_feature(enable = "sve")]
38114#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38115#[cfg_attr(test, assert_instr(st1b))]
38116pub unsafe fn svst1b_s64(pg: svbool_t, base: *mut i8, data: svint64_t) {
38117    unsafe extern "unadjusted" {
38118        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i8")]
38119        fn _svst1b_s64(data: nxv2i8, pg: svbool2_t, ptr: *mut i8);
38120    }
38121    _svst1b_s64(
38122        crate::intrinsics::simd::simd_cast(data),
38123        pg.sve_into(),
38124        base,
38125    )
38126}
38127#[doc = "Truncate to 16 bits and store"]
38128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s64])"]
38129#[doc = "## Safety"]
38130#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38131#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38132#[inline(always)]
38133#[target_feature(enable = "sve")]
38134#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38135#[cfg_attr(test, assert_instr(st1h))]
38136pub unsafe fn svst1h_s64(pg: svbool_t, base: *mut i16, data: svint64_t) {
38137    unsafe extern "unadjusted" {
38138        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i16")]
38139        fn _svst1h_s64(data: nxv2i16, pg: svbool2_t, ptr: *mut i16);
38140    }
38141    _svst1h_s64(
38142        crate::intrinsics::simd::simd_cast(data),
38143        pg.sve_into(),
38144        base,
38145    )
38146}
38147#[doc = "Truncate to 32 bits and store"]
38148#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_s64])"]
38149#[doc = "## Safety"]
38150#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38151#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38152#[inline(always)]
38153#[target_feature(enable = "sve")]
38154#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38155#[cfg_attr(test, assert_instr(st1w))]
38156pub unsafe fn svst1w_s64(pg: svbool_t, base: *mut i32, data: svint64_t) {
38157    unsafe extern "unadjusted" {
38158        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i32")]
38159        fn _svst1w_s64(data: nxv2i32, pg: svbool2_t, ptr: *mut i32);
38160    }
38161    _svst1w_s64(
38162        crate::intrinsics::simd::simd_cast(data),
38163        pg.sve_into(),
38164        base,
38165    )
38166}
38167#[doc = "Truncate to 8 bits and store"]
38168#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u16])"]
38169#[doc = "## Safety"]
38170#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38171#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38172#[inline(always)]
38173#[target_feature(enable = "sve")]
38174#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38175#[cfg_attr(test, assert_instr(st1b))]
38176pub unsafe fn svst1b_u16(pg: svbool_t, base: *mut u8, data: svuint16_t) {
38177    svst1b_s16(pg, base.as_signed(), data.as_signed())
38178}
38179#[doc = "Truncate to 8 bits and store"]
38180#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u32])"]
38181#[doc = "## Safety"]
38182#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38183#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38184#[inline(always)]
38185#[target_feature(enable = "sve")]
38186#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38187#[cfg_attr(test, assert_instr(st1b))]
38188pub unsafe fn svst1b_u32(pg: svbool_t, base: *mut u8, data: svuint32_t) {
38189    svst1b_s32(pg, base.as_signed(), data.as_signed())
38190}
38191#[doc = "Truncate to 16 bits and store"]
38192#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u32])"]
38193#[doc = "## Safety"]
38194#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38195#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38196#[inline(always)]
38197#[target_feature(enable = "sve")]
38198#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38199#[cfg_attr(test, assert_instr(st1h))]
38200pub unsafe fn svst1h_u32(pg: svbool_t, base: *mut u16, data: svuint32_t) {
38201    svst1h_s32(pg, base.as_signed(), data.as_signed())
38202}
38203#[doc = "Truncate to 8 bits and store"]
38204#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u64])"]
38205#[doc = "## Safety"]
38206#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38207#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38208#[inline(always)]
38209#[target_feature(enable = "sve")]
38210#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38211#[cfg_attr(test, assert_instr(st1b))]
38212pub unsafe fn svst1b_u64(pg: svbool_t, base: *mut u8, data: svuint64_t) {
38213    svst1b_s64(pg, base.as_signed(), data.as_signed())
38214}
38215#[doc = "Truncate to 16 bits and store"]
38216#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u64])"]
38217#[doc = "## Safety"]
38218#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38219#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38220#[inline(always)]
38221#[target_feature(enable = "sve")]
38222#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38223#[cfg_attr(test, assert_instr(st1h))]
38224pub unsafe fn svst1h_u64(pg: svbool_t, base: *mut u16, data: svuint64_t) {
38225    svst1h_s64(pg, base.as_signed(), data.as_signed())
38226}
38227#[doc = "Truncate to 32 bits and store"]
38228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_u64])"]
38229#[doc = "## Safety"]
38230#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38231#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38232#[inline(always)]
38233#[target_feature(enable = "sve")]
38234#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38235#[cfg_attr(test, assert_instr(st1w))]
38236pub unsafe fn svst1w_u64(pg: svbool_t, base: *mut u32, data: svuint64_t) {
38237    svst1w_s64(pg, base.as_signed(), data.as_signed())
38238}
38239#[doc = "Truncate to 8 bits and store"]
38240#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_s32])"]
38241#[doc = "## Safety"]
38242#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38243#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38244#[inline(always)]
38245#[target_feature(enable = "sve")]
38246#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38247#[cfg_attr(test, assert_instr(st1b))]
38248pub unsafe fn svst1b_scatter_s32offset_s32(
38249    pg: svbool_t,
38250    base: *mut i8,
38251    offsets: svint32_t,
38252    data: svint32_t,
38253) {
38254    unsafe extern "unadjusted" {
38255        #[cfg_attr(
38256            target_arch = "aarch64",
38257            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8"
38258        )]
38259        fn _svst1b_scatter_s32offset_s32(
38260            data: nxv4i8,
38261            pg: svbool4_t,
38262            base: *mut i8,
38263            offsets: svint32_t,
38264        );
38265    }
38266    _svst1b_scatter_s32offset_s32(
38267        crate::intrinsics::simd::simd_cast(data),
38268        pg.sve_into(),
38269        base,
38270        offsets,
38271    )
38272}
38273#[doc = "Truncate to 16 bits and store"]
38274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_s32])"]
38275#[doc = "## Safety"]
38276#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38277#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38278#[inline(always)]
38279#[target_feature(enable = "sve")]
38280#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38281#[cfg_attr(test, assert_instr(st1h))]
38282pub unsafe fn svst1h_scatter_s32offset_s32(
38283    pg: svbool_t,
38284    base: *mut i16,
38285    offsets: svint32_t,
38286    data: svint32_t,
38287) {
38288    unsafe extern "unadjusted" {
38289        #[cfg_attr(
38290            target_arch = "aarch64",
38291            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16"
38292        )]
38293        fn _svst1h_scatter_s32offset_s32(
38294            data: nxv4i16,
38295            pg: svbool4_t,
38296            base: *mut i16,
38297            offsets: svint32_t,
38298        );
38299    }
38300    _svst1h_scatter_s32offset_s32(
38301        crate::intrinsics::simd::simd_cast(data),
38302        pg.sve_into(),
38303        base,
38304        offsets,
38305    )
38306}
38307#[doc = "Truncate to 8 bits and store"]
38308#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_u32])"]
38309#[doc = "## Safety"]
38310#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38311#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38312#[inline(always)]
38313#[target_feature(enable = "sve")]
38314#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38315#[cfg_attr(test, assert_instr(st1b))]
38316pub unsafe fn svst1b_scatter_s32offset_u32(
38317    pg: svbool_t,
38318    base: *mut u8,
38319    offsets: svint32_t,
38320    data: svuint32_t,
38321) {
38322    svst1b_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
38323}
38324#[doc = "Truncate to 16 bits and store"]
38325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_u32])"]
38326#[doc = "## Safety"]
38327#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38328#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38329#[inline(always)]
38330#[target_feature(enable = "sve")]
38331#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38332#[cfg_attr(test, assert_instr(st1h))]
38333pub unsafe fn svst1h_scatter_s32offset_u32(
38334    pg: svbool_t,
38335    base: *mut u16,
38336    offsets: svint32_t,
38337    data: svuint32_t,
38338) {
38339    svst1h_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
38340}
38341#[doc = "Truncate to 8 bits and store"]
38342#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_s64])"]
38343#[doc = "## Safety"]
38344#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38345#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38346#[inline(always)]
38347#[target_feature(enable = "sve")]
38348#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38349#[cfg_attr(test, assert_instr(st1b))]
38350pub unsafe fn svst1b_scatter_s64offset_s64(
38351    pg: svbool_t,
38352    base: *mut i8,
38353    offsets: svint64_t,
38354    data: svint64_t,
38355) {
38356    unsafe extern "unadjusted" {
38357        #[cfg_attr(
38358            target_arch = "aarch64",
38359            link_name = "llvm.aarch64.sve.st1.scatter.nxv2i8"
38360        )]
38361        fn _svst1b_scatter_s64offset_s64(
38362            data: nxv2i8,
38363            pg: svbool2_t,
38364            base: *mut i8,
38365            offsets: svint64_t,
38366        );
38367    }
38368    _svst1b_scatter_s64offset_s64(
38369        crate::intrinsics::simd::simd_cast(data),
38370        pg.sve_into(),
38371        base,
38372        offsets,
38373    )
38374}
38375#[doc = "Truncate to 16 bits and store"]
38376#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_s64])"]
38377#[doc = "## Safety"]
38378#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38379#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38380#[inline(always)]
38381#[target_feature(enable = "sve")]
38382#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38383#[cfg_attr(test, assert_instr(st1h))]
38384pub unsafe fn svst1h_scatter_s64offset_s64(
38385    pg: svbool_t,
38386    base: *mut i16,
38387    offsets: svint64_t,
38388    data: svint64_t,
38389) {
38390    unsafe extern "unadjusted" {
38391        #[cfg_attr(
38392            target_arch = "aarch64",
38393            link_name = "llvm.aarch64.sve.st1.scatter.nxv2i16"
38394        )]
38395        fn _svst1h_scatter_s64offset_s64(
38396            data: nxv2i16,
38397            pg: svbool2_t,
38398            base: *mut i16,
38399            offsets: svint64_t,
38400        );
38401    }
38402    _svst1h_scatter_s64offset_s64(
38403        crate::intrinsics::simd::simd_cast(data),
38404        pg.sve_into(),
38405        base,
38406        offsets,
38407    )
38408}
38409#[doc = "Truncate to 32 bits and store"]
38410#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_s64])"]
38411#[doc = "## Safety"]
38412#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38413#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38414#[inline(always)]
38415#[target_feature(enable = "sve")]
38416#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38417#[cfg_attr(test, assert_instr(st1w))]
38418pub unsafe fn svst1w_scatter_s64offset_s64(
38419    pg: svbool_t,
38420    base: *mut i32,
38421    offsets: svint64_t,
38422    data: svint64_t,
38423) {
38424    unsafe extern "unadjusted" {
38425        #[cfg_attr(
38426            target_arch = "aarch64",
38427            link_name = "llvm.aarch64.sve.st1.scatter.nxv2i32"
38428        )]
38429        fn _svst1w_scatter_s64offset_s64(
38430            data: nxv2i32,
38431            pg: svbool2_t,
38432            base: *mut i32,
38433            offsets: svint64_t,
38434        );
38435    }
38436    _svst1w_scatter_s64offset_s64(
38437        crate::intrinsics::simd::simd_cast(data),
38438        pg.sve_into(),
38439        base,
38440        offsets,
38441    )
38442}
38443#[doc = "Truncate to 8 bits and store"]
38444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_u64])"]
38445#[doc = "## Safety"]
38446#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38447#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38448#[inline(always)]
38449#[target_feature(enable = "sve")]
38450#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38451#[cfg_attr(test, assert_instr(st1b))]
38452pub unsafe fn svst1b_scatter_s64offset_u64(
38453    pg: svbool_t,
38454    base: *mut u8,
38455    offsets: svint64_t,
38456    data: svuint64_t,
38457) {
38458    svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
38459}
38460#[doc = "Truncate to 16 bits and store"]
38461#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_u64])"]
38462#[doc = "## Safety"]
38463#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38464#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38465#[inline(always)]
38466#[target_feature(enable = "sve")]
38467#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38468#[cfg_attr(test, assert_instr(st1h))]
38469pub unsafe fn svst1h_scatter_s64offset_u64(
38470    pg: svbool_t,
38471    base: *mut u16,
38472    offsets: svint64_t,
38473    data: svuint64_t,
38474) {
38475    svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
38476}
38477#[doc = "Truncate to 32 bits and store"]
38478#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_u64])"]
38479#[doc = "## Safety"]
38480#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38481#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38482#[inline(always)]
38483#[target_feature(enable = "sve")]
38484#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38485#[cfg_attr(test, assert_instr(st1w))]
38486pub unsafe fn svst1w_scatter_s64offset_u64(
38487    pg: svbool_t,
38488    base: *mut u32,
38489    offsets: svint64_t,
38490    data: svuint64_t,
38491) {
38492    svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
38493}
38494#[doc = "Truncate to 8 bits and store"]
38495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_s32])"]
38496#[doc = "## Safety"]
38497#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38498#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38499#[inline(always)]
38500#[target_feature(enable = "sve")]
38501#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38502#[cfg_attr(test, assert_instr(st1b))]
38503pub unsafe fn svst1b_scatter_u32offset_s32(
38504    pg: svbool_t,
38505    base: *mut i8,
38506    offsets: svuint32_t,
38507    data: svint32_t,
38508) {
38509    unsafe extern "unadjusted" {
38510        #[cfg_attr(
38511            target_arch = "aarch64",
38512            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8"
38513        )]
38514        fn _svst1b_scatter_u32offset_s32(
38515            data: nxv4i8,
38516            pg: svbool4_t,
38517            base: *mut i8,
38518            offsets: svint32_t,
38519        );
38520    }
38521    _svst1b_scatter_u32offset_s32(
38522        crate::intrinsics::simd::simd_cast(data),
38523        pg.sve_into(),
38524        base,
38525        offsets.as_signed(),
38526    )
38527}
38528#[doc = "Truncate to 16 bits and store"]
38529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_s32])"]
38530#[doc = "## Safety"]
38531#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38532#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38533#[inline(always)]
38534#[target_feature(enable = "sve")]
38535#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38536#[cfg_attr(test, assert_instr(st1h))]
38537pub unsafe fn svst1h_scatter_u32offset_s32(
38538    pg: svbool_t,
38539    base: *mut i16,
38540    offsets: svuint32_t,
38541    data: svint32_t,
38542) {
38543    unsafe extern "unadjusted" {
38544        #[cfg_attr(
38545            target_arch = "aarch64",
38546            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16"
38547        )]
38548        fn _svst1h_scatter_u32offset_s32(
38549            data: nxv4i16,
38550            pg: svbool4_t,
38551            base: *mut i16,
38552            offsets: svint32_t,
38553        );
38554    }
38555    _svst1h_scatter_u32offset_s32(
38556        crate::intrinsics::simd::simd_cast(data),
38557        pg.sve_into(),
38558        base,
38559        offsets.as_signed(),
38560    )
38561}
38562#[doc = "Truncate to 8 bits and store"]
38563#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_u32])"]
38564#[doc = "## Safety"]
38565#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38566#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38567#[inline(always)]
38568#[target_feature(enable = "sve")]
38569#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38570#[cfg_attr(test, assert_instr(st1b))]
38571pub unsafe fn svst1b_scatter_u32offset_u32(
38572    pg: svbool_t,
38573    base: *mut u8,
38574    offsets: svuint32_t,
38575    data: svuint32_t,
38576) {
38577    svst1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
38578}
38579#[doc = "Truncate to 16 bits and store"]
38580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_u32])"]
38581#[doc = "## Safety"]
38582#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38583#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38584#[inline(always)]
38585#[target_feature(enable = "sve")]
38586#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38587#[cfg_attr(test, assert_instr(st1h))]
38588pub unsafe fn svst1h_scatter_u32offset_u32(
38589    pg: svbool_t,
38590    base: *mut u16,
38591    offsets: svuint32_t,
38592    data: svuint32_t,
38593) {
38594    svst1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
38595}
38596#[doc = "Truncate to 8 bits and store"]
38597#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_s64])"]
38598#[doc = "## Safety"]
38599#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38600#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38601#[inline(always)]
38602#[target_feature(enable = "sve")]
38603#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38604#[cfg_attr(test, assert_instr(st1b))]
38605pub unsafe fn svst1b_scatter_u64offset_s64(
38606    pg: svbool_t,
38607    base: *mut i8,
38608    offsets: svuint64_t,
38609    data: svint64_t,
38610) {
38611    svst1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
38612}
38613#[doc = "Truncate to 16 bits and store"]
38614#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_s64])"]
38615#[doc = "## Safety"]
38616#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38617#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38618#[inline(always)]
38619#[target_feature(enable = "sve")]
38620#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38621#[cfg_attr(test, assert_instr(st1h))]
38622pub unsafe fn svst1h_scatter_u64offset_s64(
38623    pg: svbool_t,
38624    base: *mut i16,
38625    offsets: svuint64_t,
38626    data: svint64_t,
38627) {
38628    svst1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
38629}
38630#[doc = "Truncate to 32 bits and store"]
38631#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_s64])"]
38632#[doc = "## Safety"]
38633#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38634#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38635#[inline(always)]
38636#[target_feature(enable = "sve")]
38637#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38638#[cfg_attr(test, assert_instr(st1w))]
38639pub unsafe fn svst1w_scatter_u64offset_s64(
38640    pg: svbool_t,
38641    base: *mut i32,
38642    offsets: svuint64_t,
38643    data: svint64_t,
38644) {
38645    svst1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
38646}
38647#[doc = "Truncate to 8 bits and store"]
38648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_u64])"]
38649#[doc = "## Safety"]
38650#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38651#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38652#[inline(always)]
38653#[target_feature(enable = "sve")]
38654#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38655#[cfg_attr(test, assert_instr(st1b))]
38656pub unsafe fn svst1b_scatter_u64offset_u64(
38657    pg: svbool_t,
38658    base: *mut u8,
38659    offsets: svuint64_t,
38660    data: svuint64_t,
38661) {
38662    svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
38663}
38664#[doc = "Truncate to 16 bits and store"]
38665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_u64])"]
38666#[doc = "## Safety"]
38667#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38668#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38669#[inline(always)]
38670#[target_feature(enable = "sve")]
38671#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38672#[cfg_attr(test, assert_instr(st1h))]
38673pub unsafe fn svst1h_scatter_u64offset_u64(
38674    pg: svbool_t,
38675    base: *mut u16,
38676    offsets: svuint64_t,
38677    data: svuint64_t,
38678) {
38679    svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
38680}
38681#[doc = "Truncate to 32 bits and store"]
38682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_u64])"]
38683#[doc = "## Safety"]
38684#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38685#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38686#[inline(always)]
38687#[target_feature(enable = "sve")]
38688#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38689#[cfg_attr(test, assert_instr(st1w))]
38690pub unsafe fn svst1w_scatter_u64offset_u64(
38691    pg: svbool_t,
38692    base: *mut u32,
38693    offsets: svuint64_t,
38694    data: svuint64_t,
38695) {
38696    svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
38697}
38698#[doc = "Truncate to 8 bits and store"]
38699#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_s32])"]
38700#[doc = "## Safety"]
38701#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38702#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38703#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38704#[inline(always)]
38705#[target_feature(enable = "sve")]
38706#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38707#[cfg_attr(test, assert_instr(st1b))]
38708pub unsafe fn svst1b_scatter_u32base_offset_s32(
38709    pg: svbool_t,
38710    bases: svuint32_t,
38711    offset: i64,
38712    data: svint32_t,
38713) {
38714    unsafe extern "unadjusted" {
38715        #[cfg_attr(
38716            target_arch = "aarch64",
38717            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32"
38718        )]
38719        fn _svst1b_scatter_u32base_offset_s32(
38720            data: nxv4i8,
38721            pg: svbool4_t,
38722            bases: svint32_t,
38723            offset: i64,
38724        );
38725    }
38726    _svst1b_scatter_u32base_offset_s32(
38727        crate::intrinsics::simd::simd_cast(data),
38728        pg.sve_into(),
38729        bases.as_signed(),
38730        offset,
38731    )
38732}
38733#[doc = "Truncate to 16 bits and store"]
38734#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_s32])"]
38735#[doc = "## Safety"]
38736#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38737#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38738#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38739#[inline(always)]
38740#[target_feature(enable = "sve")]
38741#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38742#[cfg_attr(test, assert_instr(st1h))]
38743pub unsafe fn svst1h_scatter_u32base_offset_s32(
38744    pg: svbool_t,
38745    bases: svuint32_t,
38746    offset: i64,
38747    data: svint32_t,
38748) {
38749    unsafe extern "unadjusted" {
38750        #[cfg_attr(
38751            target_arch = "aarch64",
38752            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32"
38753        )]
38754        fn _svst1h_scatter_u32base_offset_s32(
38755            data: nxv4i16,
38756            pg: svbool4_t,
38757            bases: svint32_t,
38758            offset: i64,
38759        );
38760    }
38761    _svst1h_scatter_u32base_offset_s32(
38762        crate::intrinsics::simd::simd_cast(data),
38763        pg.sve_into(),
38764        bases.as_signed(),
38765        offset,
38766    )
38767}
38768#[doc = "Truncate to 8 bits and store"]
38769#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_u32])"]
38770#[doc = "## Safety"]
38771#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38772#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38773#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38774#[inline(always)]
38775#[target_feature(enable = "sve")]
38776#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38777#[cfg_attr(test, assert_instr(st1b))]
38778pub unsafe fn svst1b_scatter_u32base_offset_u32(
38779    pg: svbool_t,
38780    bases: svuint32_t,
38781    offset: i64,
38782    data: svuint32_t,
38783) {
38784    svst1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
38785}
38786#[doc = "Truncate to 16 bits and store"]
38787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_u32])"]
38788#[doc = "## Safety"]
38789#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38790#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38791#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38792#[inline(always)]
38793#[target_feature(enable = "sve")]
38794#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38795#[cfg_attr(test, assert_instr(st1h))]
38796pub unsafe fn svst1h_scatter_u32base_offset_u32(
38797    pg: svbool_t,
38798    bases: svuint32_t,
38799    offset: i64,
38800    data: svuint32_t,
38801) {
38802    svst1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
38803}
38804#[doc = "Truncate to 8 bits and store"]
38805#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_s64])"]
38806#[doc = "## Safety"]
38807#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38808#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38809#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38810#[inline(always)]
38811#[target_feature(enable = "sve")]
38812#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38813#[cfg_attr(test, assert_instr(st1b))]
38814pub unsafe fn svst1b_scatter_u64base_offset_s64(
38815    pg: svbool_t,
38816    bases: svuint64_t,
38817    offset: i64,
38818    data: svint64_t,
38819) {
38820    unsafe extern "unadjusted" {
38821        #[cfg_attr(
38822            target_arch = "aarch64",
38823            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64"
38824        )]
38825        fn _svst1b_scatter_u64base_offset_s64(
38826            data: nxv2i8,
38827            pg: svbool2_t,
38828            bases: svint64_t,
38829            offset: i64,
38830        );
38831    }
38832    _svst1b_scatter_u64base_offset_s64(
38833        crate::intrinsics::simd::simd_cast(data),
38834        pg.sve_into(),
38835        bases.as_signed(),
38836        offset,
38837    )
38838}
38839#[doc = "Truncate to 16 bits and store"]
38840#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_s64])"]
38841#[doc = "## Safety"]
38842#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38843#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38844#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38845#[inline(always)]
38846#[target_feature(enable = "sve")]
38847#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38848#[cfg_attr(test, assert_instr(st1h))]
38849pub unsafe fn svst1h_scatter_u64base_offset_s64(
38850    pg: svbool_t,
38851    bases: svuint64_t,
38852    offset: i64,
38853    data: svint64_t,
38854) {
38855    unsafe extern "unadjusted" {
38856        #[cfg_attr(
38857            target_arch = "aarch64",
38858            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64"
38859        )]
38860        fn _svst1h_scatter_u64base_offset_s64(
38861            data: nxv2i16,
38862            pg: svbool2_t,
38863            bases: svint64_t,
38864            offset: i64,
38865        );
38866    }
38867    _svst1h_scatter_u64base_offset_s64(
38868        crate::intrinsics::simd::simd_cast(data),
38869        pg.sve_into(),
38870        bases.as_signed(),
38871        offset,
38872    )
38873}
38874#[doc = "Truncate to 32 bits and store"]
38875#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_s64])"]
38876#[doc = "## Safety"]
38877#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38878#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38879#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38880#[inline(always)]
38881#[target_feature(enable = "sve")]
38882#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38883#[cfg_attr(test, assert_instr(st1w))]
38884pub unsafe fn svst1w_scatter_u64base_offset_s64(
38885    pg: svbool_t,
38886    bases: svuint64_t,
38887    offset: i64,
38888    data: svint64_t,
38889) {
38890    unsafe extern "unadjusted" {
38891        #[cfg_attr(
38892            target_arch = "aarch64",
38893            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64"
38894        )]
38895        fn _svst1w_scatter_u64base_offset_s64(
38896            data: nxv2i32,
38897            pg: svbool2_t,
38898            bases: svint64_t,
38899            offset: i64,
38900        );
38901    }
38902    _svst1w_scatter_u64base_offset_s64(
38903        crate::intrinsics::simd::simd_cast(data),
38904        pg.sve_into(),
38905        bases.as_signed(),
38906        offset,
38907    )
38908}
38909#[doc = "Truncate to 8 bits and store"]
38910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_u64])"]
38911#[doc = "## Safety"]
38912#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38913#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38914#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38915#[inline(always)]
38916#[target_feature(enable = "sve")]
38917#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38918#[cfg_attr(test, assert_instr(st1b))]
38919pub unsafe fn svst1b_scatter_u64base_offset_u64(
38920    pg: svbool_t,
38921    bases: svuint64_t,
38922    offset: i64,
38923    data: svuint64_t,
38924) {
38925    svst1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
38926}
38927#[doc = "Truncate to 16 bits and store"]
38928#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_u64])"]
38929#[doc = "## Safety"]
38930#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38931#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38932#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38933#[inline(always)]
38934#[target_feature(enable = "sve")]
38935#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38936#[cfg_attr(test, assert_instr(st1h))]
38937pub unsafe fn svst1h_scatter_u64base_offset_u64(
38938    pg: svbool_t,
38939    bases: svuint64_t,
38940    offset: i64,
38941    data: svuint64_t,
38942) {
38943    svst1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
38944}
38945#[doc = "Truncate to 32 bits and store"]
38946#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_u64])"]
38947#[doc = "## Safety"]
38948#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38949#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38950#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38951#[inline(always)]
38952#[target_feature(enable = "sve")]
38953#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38954#[cfg_attr(test, assert_instr(st1w))]
38955pub unsafe fn svst1w_scatter_u64base_offset_u64(
38956    pg: svbool_t,
38957    bases: svuint64_t,
38958    offset: i64,
38959    data: svuint64_t,
38960) {
38961    svst1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
38962}
38963#[doc = "Truncate to 8 bits and store"]
38964#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_s32])"]
38965#[doc = "## Safety"]
38966#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38967#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38968#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38969#[inline(always)]
38970#[target_feature(enable = "sve")]
38971#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38972#[cfg_attr(test, assert_instr(st1b))]
38973pub unsafe fn svst1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
38974    svst1b_scatter_u32base_offset_s32(pg, bases, 0, data)
38975}
38976#[doc = "Truncate to 16 bits and store"]
38977#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_s32])"]
38978#[doc = "## Safety"]
38979#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38980#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38981#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38982#[inline(always)]
38983#[target_feature(enable = "sve")]
38984#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38985#[cfg_attr(test, assert_instr(st1h))]
38986pub unsafe fn svst1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
38987    svst1h_scatter_u32base_offset_s32(pg, bases, 0, data)
38988}
38989#[doc = "Truncate to 8 bits and store"]
38990#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_u32])"]
38991#[doc = "## Safety"]
38992#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
38993#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
38994#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
38995#[inline(always)]
38996#[target_feature(enable = "sve")]
38997#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
38998#[cfg_attr(test, assert_instr(st1b))]
38999pub unsafe fn svst1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
39000    svst1b_scatter_u32base_offset_u32(pg, bases, 0, data)
39001}
39002#[doc = "Truncate to 16 bits and store"]
39003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_u32])"]
39004#[doc = "## Safety"]
39005#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39006#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39007#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39008#[inline(always)]
39009#[target_feature(enable = "sve")]
39010#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39011#[cfg_attr(test, assert_instr(st1h))]
39012pub unsafe fn svst1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
39013    svst1h_scatter_u32base_offset_u32(pg, bases, 0, data)
39014}
39015#[doc = "Truncate to 8 bits and store"]
39016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_s64])"]
39017#[doc = "## Safety"]
39018#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39019#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39020#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39021#[inline(always)]
39022#[target_feature(enable = "sve")]
39023#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39024#[cfg_attr(test, assert_instr(st1b))]
39025pub unsafe fn svst1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
39026    svst1b_scatter_u64base_offset_s64(pg, bases, 0, data)
39027}
39028#[doc = "Truncate to 16 bits and store"]
39029#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_s64])"]
39030#[doc = "## Safety"]
39031#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39032#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39033#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39034#[inline(always)]
39035#[target_feature(enable = "sve")]
39036#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39037#[cfg_attr(test, assert_instr(st1h))]
39038pub unsafe fn svst1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
39039    svst1h_scatter_u64base_offset_s64(pg, bases, 0, data)
39040}
39041#[doc = "Truncate to 32 bits and store"]
39042#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_s64])"]
39043#[doc = "## Safety"]
39044#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39045#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39046#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39047#[inline(always)]
39048#[target_feature(enable = "sve")]
39049#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39050#[cfg_attr(test, assert_instr(st1w))]
39051pub unsafe fn svst1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
39052    svst1w_scatter_u64base_offset_s64(pg, bases, 0, data)
39053}
39054#[doc = "Truncate to 8 bits and store"]
39055#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_u64])"]
39056#[doc = "## Safety"]
39057#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39058#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39059#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39060#[inline(always)]
39061#[target_feature(enable = "sve")]
39062#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39063#[cfg_attr(test, assert_instr(st1b))]
39064pub unsafe fn svst1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
39065    svst1b_scatter_u64base_offset_u64(pg, bases, 0, data)
39066}
39067#[doc = "Truncate to 16 bits and store"]
39068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_u64])"]
39069#[doc = "## Safety"]
39070#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39071#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39072#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39073#[inline(always)]
39074#[target_feature(enable = "sve")]
39075#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39076#[cfg_attr(test, assert_instr(st1h))]
39077pub unsafe fn svst1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
39078    svst1h_scatter_u64base_offset_u64(pg, bases, 0, data)
39079}
39080#[doc = "Truncate to 32 bits and store"]
39081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_u64])"]
39082#[doc = "## Safety"]
39083#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39084#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39085#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39086#[inline(always)]
39087#[target_feature(enable = "sve")]
39088#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39089#[cfg_attr(test, assert_instr(st1w))]
39090pub unsafe fn svst1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
39091    svst1w_scatter_u64base_offset_u64(pg, bases, 0, data)
39092}
39093#[doc = "Truncate to 8 bits and store"]
39094#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s16])"]
39095#[doc = "## Safety"]
39096#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39097#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39098#[inline(always)]
39099#[target_feature(enable = "sve")]
39100#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39101#[cfg_attr(test, assert_instr(st1b))]
39102pub unsafe fn svst1b_vnum_s16(pg: svbool_t, base: *mut i8, vnum: i64, data: svint16_t) {
39103    svst1b_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
39104}
39105#[doc = "Truncate to 8 bits and store"]
39106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s32])"]
39107#[doc = "## Safety"]
39108#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39109#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39110#[inline(always)]
39111#[target_feature(enable = "sve")]
39112#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39113#[cfg_attr(test, assert_instr(st1b))]
39114pub unsafe fn svst1b_vnum_s32(pg: svbool_t, base: *mut i8, vnum: i64, data: svint32_t) {
39115    svst1b_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
39116}
39117#[doc = "Truncate to 16 bits and store"]
39118#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s32])"]
39119#[doc = "## Safety"]
39120#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39121#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39122#[inline(always)]
39123#[target_feature(enable = "sve")]
39124#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39125#[cfg_attr(test, assert_instr(st1h))]
39126pub unsafe fn svst1h_vnum_s32(pg: svbool_t, base: *mut i16, vnum: i64, data: svint32_t) {
39127    svst1h_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
39128}
39129#[doc = "Truncate to 8 bits and store"]
39130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s64])"]
39131#[doc = "## Safety"]
39132#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39133#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39134#[inline(always)]
39135#[target_feature(enable = "sve")]
39136#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39137#[cfg_attr(test, assert_instr(st1b))]
39138pub unsafe fn svst1b_vnum_s64(pg: svbool_t, base: *mut i8, vnum: i64, data: svint64_t) {
39139    svst1b_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
39140}
39141#[doc = "Truncate to 16 bits and store"]
39142#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s64])"]
39143#[doc = "## Safety"]
39144#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39145#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39146#[inline(always)]
39147#[target_feature(enable = "sve")]
39148#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39149#[cfg_attr(test, assert_instr(st1h))]
39150pub unsafe fn svst1h_vnum_s64(pg: svbool_t, base: *mut i16, vnum: i64, data: svint64_t) {
39151    svst1h_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
39152}
39153#[doc = "Truncate to 32 bits and store"]
39154#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_s64])"]
39155#[doc = "## Safety"]
39156#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39157#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39158#[inline(always)]
39159#[target_feature(enable = "sve")]
39160#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39161#[cfg_attr(test, assert_instr(st1w))]
39162pub unsafe fn svst1w_vnum_s64(pg: svbool_t, base: *mut i32, vnum: i64, data: svint64_t) {
39163    svst1w_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
39164}
39165#[doc = "Truncate to 8 bits and store"]
39166#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u16])"]
39167#[doc = "## Safety"]
39168#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39169#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39170#[inline(always)]
39171#[target_feature(enable = "sve")]
39172#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39173#[cfg_attr(test, assert_instr(st1b))]
39174pub unsafe fn svst1b_vnum_u16(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint16_t) {
39175    svst1b_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
39176}
39177#[doc = "Truncate to 8 bits and store"]
39178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u32])"]
39179#[doc = "## Safety"]
39180#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39181#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39182#[inline(always)]
39183#[target_feature(enable = "sve")]
39184#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39185#[cfg_attr(test, assert_instr(st1b))]
39186pub unsafe fn svst1b_vnum_u32(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint32_t) {
39187    svst1b_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
39188}
39189#[doc = "Truncate to 16 bits and store"]
39190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u32])"]
39191#[doc = "## Safety"]
39192#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39193#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39194#[inline(always)]
39195#[target_feature(enable = "sve")]
39196#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39197#[cfg_attr(test, assert_instr(st1h))]
39198pub unsafe fn svst1h_vnum_u32(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint32_t) {
39199    svst1h_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
39200}
39201#[doc = "Truncate to 8 bits and store"]
39202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u64])"]
39203#[doc = "## Safety"]
39204#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39205#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39206#[inline(always)]
39207#[target_feature(enable = "sve")]
39208#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39209#[cfg_attr(test, assert_instr(st1b))]
39210pub unsafe fn svst1b_vnum_u64(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint64_t) {
39211    svst1b_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
39212}
39213#[doc = "Truncate to 16 bits and store"]
39214#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u64])"]
39215#[doc = "## Safety"]
39216#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39217#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39218#[inline(always)]
39219#[target_feature(enable = "sve")]
39220#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39221#[cfg_attr(test, assert_instr(st1h))]
39222pub unsafe fn svst1h_vnum_u64(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint64_t) {
39223    svst1h_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
39224}
39225#[doc = "Truncate to 32 bits and store"]
39226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_u64])"]
39227#[doc = "## Safety"]
39228#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
39229#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39230#[inline(always)]
39231#[target_feature(enable = "sve")]
39232#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39233#[cfg_attr(test, assert_instr(st1w))]
39234pub unsafe fn svst1w_vnum_u64(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint64_t) {
39235    svst1w_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
39236}
39237#[doc = "Truncate to 16 bits and store"]
39238#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_s32])"]
39239#[doc = "## Safety"]
39240#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39241#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39242#[inline(always)]
39243#[target_feature(enable = "sve")]
39244#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39245#[cfg_attr(test, assert_instr(st1h))]
39246pub unsafe fn svst1h_scatter_s32index_s32(
39247    pg: svbool_t,
39248    base: *mut i16,
39249    indices: svint32_t,
39250    data: svint32_t,
39251) {
39252    unsafe extern "unadjusted" {
39253        #[cfg_attr(
39254            target_arch = "aarch64",
39255            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16"
39256        )]
39257        fn _svst1h_scatter_s32index_s32(
39258            data: nxv4i16,
39259            pg: svbool4_t,
39260            base: *mut i16,
39261            indices: svint32_t,
39262        );
39263    }
39264    _svst1h_scatter_s32index_s32(
39265        crate::intrinsics::simd::simd_cast(data),
39266        pg.sve_into(),
39267        base,
39268        indices,
39269    )
39270}
39271#[doc = "Truncate to 16 bits and store"]
39272#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_u32])"]
39273#[doc = "## Safety"]
39274#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39275#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39276#[inline(always)]
39277#[target_feature(enable = "sve")]
39278#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39279#[cfg_attr(test, assert_instr(st1h))]
39280pub unsafe fn svst1h_scatter_s32index_u32(
39281    pg: svbool_t,
39282    base: *mut u16,
39283    indices: svint32_t,
39284    data: svuint32_t,
39285) {
39286    svst1h_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed())
39287}
39288#[doc = "Truncate to 16 bits and store"]
39289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_s64])"]
39290#[doc = "## Safety"]
39291#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39292#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39293#[inline(always)]
39294#[target_feature(enable = "sve")]
39295#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39296#[cfg_attr(test, assert_instr(st1h))]
39297pub unsafe fn svst1h_scatter_s64index_s64(
39298    pg: svbool_t,
39299    base: *mut i16,
39300    indices: svint64_t,
39301    data: svint64_t,
39302) {
39303    unsafe extern "unadjusted" {
39304        #[cfg_attr(
39305            target_arch = "aarch64",
39306            link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i16"
39307        )]
39308        fn _svst1h_scatter_s64index_s64(
39309            data: nxv2i16,
39310            pg: svbool2_t,
39311            base: *mut i16,
39312            indices: svint64_t,
39313        );
39314    }
39315    _svst1h_scatter_s64index_s64(
39316        crate::intrinsics::simd::simd_cast(data),
39317        pg.sve_into(),
39318        base,
39319        indices,
39320    )
39321}
39322#[doc = "Truncate to 32 bits and store"]
39323#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_s64])"]
39324#[doc = "## Safety"]
39325#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39326#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39327#[inline(always)]
39328#[target_feature(enable = "sve")]
39329#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39330#[cfg_attr(test, assert_instr(st1w))]
39331pub unsafe fn svst1w_scatter_s64index_s64(
39332    pg: svbool_t,
39333    base: *mut i32,
39334    indices: svint64_t,
39335    data: svint64_t,
39336) {
39337    unsafe extern "unadjusted" {
39338        #[cfg_attr(
39339            target_arch = "aarch64",
39340            link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i32"
39341        )]
39342        fn _svst1w_scatter_s64index_s64(
39343            data: nxv2i32,
39344            pg: svbool2_t,
39345            base: *mut i32,
39346            indices: svint64_t,
39347        );
39348    }
39349    _svst1w_scatter_s64index_s64(
39350        crate::intrinsics::simd::simd_cast(data),
39351        pg.sve_into(),
39352        base,
39353        indices,
39354    )
39355}
39356#[doc = "Truncate to 16 bits and store"]
39357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_u64])"]
39358#[doc = "## Safety"]
39359#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39360#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39361#[inline(always)]
39362#[target_feature(enable = "sve")]
39363#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39364#[cfg_attr(test, assert_instr(st1h))]
39365pub unsafe fn svst1h_scatter_s64index_u64(
39366    pg: svbool_t,
39367    base: *mut u16,
39368    indices: svint64_t,
39369    data: svuint64_t,
39370) {
39371    svst1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
39372}
39373#[doc = "Truncate to 32 bits and store"]
39374#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_u64])"]
39375#[doc = "## Safety"]
39376#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39377#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39378#[inline(always)]
39379#[target_feature(enable = "sve")]
39380#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39381#[cfg_attr(test, assert_instr(st1w))]
39382pub unsafe fn svst1w_scatter_s64index_u64(
39383    pg: svbool_t,
39384    base: *mut u32,
39385    indices: svint64_t,
39386    data: svuint64_t,
39387) {
39388    svst1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
39389}
39390#[doc = "Truncate to 16 bits and store"]
39391#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_s32])"]
39392#[doc = "## Safety"]
39393#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39394#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39395#[inline(always)]
39396#[target_feature(enable = "sve")]
39397#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39398#[cfg_attr(test, assert_instr(st1h))]
39399pub unsafe fn svst1h_scatter_u32index_s32(
39400    pg: svbool_t,
39401    base: *mut i16,
39402    indices: svuint32_t,
39403    data: svint32_t,
39404) {
39405    unsafe extern "unadjusted" {
39406        #[cfg_attr(
39407            target_arch = "aarch64",
39408            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16"
39409        )]
39410        fn _svst1h_scatter_u32index_s32(
39411            data: nxv4i16,
39412            pg: svbool4_t,
39413            base: *mut i16,
39414            indices: svint32_t,
39415        );
39416    }
39417    _svst1h_scatter_u32index_s32(
39418        crate::intrinsics::simd::simd_cast(data),
39419        pg.sve_into(),
39420        base,
39421        indices.as_signed(),
39422    )
39423}
39424#[doc = "Truncate to 16 bits and store"]
39425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_u32])"]
39426#[doc = "## Safety"]
39427#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39428#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39429#[inline(always)]
39430#[target_feature(enable = "sve")]
39431#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39432#[cfg_attr(test, assert_instr(st1h))]
39433pub unsafe fn svst1h_scatter_u32index_u32(
39434    pg: svbool_t,
39435    base: *mut u16,
39436    indices: svuint32_t,
39437    data: svuint32_t,
39438) {
39439    svst1h_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed())
39440}
39441#[doc = "Truncate to 16 bits and store"]
39442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_s64])"]
39443#[doc = "## Safety"]
39444#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39445#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39446#[inline(always)]
39447#[target_feature(enable = "sve")]
39448#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39449#[cfg_attr(test, assert_instr(st1h))]
39450pub unsafe fn svst1h_scatter_u64index_s64(
39451    pg: svbool_t,
39452    base: *mut i16,
39453    indices: svuint64_t,
39454    data: svint64_t,
39455) {
39456    svst1h_scatter_s64index_s64(pg, base, indices.as_signed(), data)
39457}
39458#[doc = "Truncate to 32 bits and store"]
39459#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_s64])"]
39460#[doc = "## Safety"]
39461#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39462#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39463#[inline(always)]
39464#[target_feature(enable = "sve")]
39465#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39466#[cfg_attr(test, assert_instr(st1w))]
39467pub unsafe fn svst1w_scatter_u64index_s64(
39468    pg: svbool_t,
39469    base: *mut i32,
39470    indices: svuint64_t,
39471    data: svint64_t,
39472) {
39473    svst1w_scatter_s64index_s64(pg, base, indices.as_signed(), data)
39474}
39475#[doc = "Truncate to 16 bits and store"]
39476#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_u64])"]
39477#[doc = "## Safety"]
39478#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39479#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39480#[inline(always)]
39481#[target_feature(enable = "sve")]
39482#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39483#[cfg_attr(test, assert_instr(st1h))]
39484pub unsafe fn svst1h_scatter_u64index_u64(
39485    pg: svbool_t,
39486    base: *mut u16,
39487    indices: svuint64_t,
39488    data: svuint64_t,
39489) {
39490    svst1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
39491}
39492#[doc = "Truncate to 32 bits and store"]
39493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_u64])"]
39494#[doc = "## Safety"]
39495#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39496#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39497#[inline(always)]
39498#[target_feature(enable = "sve")]
39499#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39500#[cfg_attr(test, assert_instr(st1w))]
39501pub unsafe fn svst1w_scatter_u64index_u64(
39502    pg: svbool_t,
39503    base: *mut u32,
39504    indices: svuint64_t,
39505    data: svuint64_t,
39506) {
39507    svst1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
39508}
39509#[doc = "Truncate to 16 bits and store"]
39510#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_s32])"]
39511#[doc = "## Safety"]
39512#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39513#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39514#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39515#[inline(always)]
39516#[target_feature(enable = "sve")]
39517#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39518#[cfg_attr(test, assert_instr(st1h))]
39519pub unsafe fn svst1h_scatter_u32base_index_s32(
39520    pg: svbool_t,
39521    bases: svuint32_t,
39522    index: i64,
39523    data: svint32_t,
39524) {
39525    svst1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data)
39526}
39527#[doc = "Truncate to 16 bits and store"]
39528#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_u32])"]
39529#[doc = "## Safety"]
39530#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39531#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39532#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39533#[inline(always)]
39534#[target_feature(enable = "sve")]
39535#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39536#[cfg_attr(test, assert_instr(st1h))]
39537pub unsafe fn svst1h_scatter_u32base_index_u32(
39538    pg: svbool_t,
39539    bases: svuint32_t,
39540    index: i64,
39541    data: svuint32_t,
39542) {
39543    svst1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data)
39544}
39545#[doc = "Truncate to 16 bits and store"]
39546#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_s64])"]
39547#[doc = "## Safety"]
39548#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39549#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39550#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39551#[inline(always)]
39552#[target_feature(enable = "sve")]
39553#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39554#[cfg_attr(test, assert_instr(st1h))]
39555pub unsafe fn svst1h_scatter_u64base_index_s64(
39556    pg: svbool_t,
39557    bases: svuint64_t,
39558    index: i64,
39559    data: svint64_t,
39560) {
39561    svst1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data)
39562}
39563#[doc = "Truncate to 32 bits and store"]
39564#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_s64])"]
39565#[doc = "## Safety"]
39566#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39567#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39568#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39569#[inline(always)]
39570#[target_feature(enable = "sve")]
39571#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39572#[cfg_attr(test, assert_instr(st1w))]
39573pub unsafe fn svst1w_scatter_u64base_index_s64(
39574    pg: svbool_t,
39575    bases: svuint64_t,
39576    index: i64,
39577    data: svint64_t,
39578) {
39579    svst1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data)
39580}
39581#[doc = "Truncate to 16 bits and store"]
39582#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_u64])"]
39583#[doc = "## Safety"]
39584#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39585#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39586#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39587#[inline(always)]
39588#[target_feature(enable = "sve")]
39589#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39590#[cfg_attr(test, assert_instr(st1h))]
39591pub unsafe fn svst1h_scatter_u64base_index_u64(
39592    pg: svbool_t,
39593    bases: svuint64_t,
39594    index: i64,
39595    data: svuint64_t,
39596) {
39597    svst1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data)
39598}
39599#[doc = "Truncate to 32 bits and store"]
39600#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_u64])"]
39601#[doc = "## Safety"]
39602#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39603#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39604#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before  using it."]
39605#[inline(always)]
39606#[target_feature(enable = "sve")]
39607#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39608#[cfg_attr(test, assert_instr(st1w))]
39609pub unsafe fn svst1w_scatter_u64base_index_u64(
39610    pg: svbool_t,
39611    bases: svuint64_t,
39612    index: i64,
39613    data: svuint64_t,
39614) {
39615    svst1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data)
39616}
39617#[doc = "Store two vectors into two-element tuples"]
39618#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f32])"]
39619#[doc = "## Safety"]
39620#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39621#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39622#[inline(always)]
39623#[target_feature(enable = "sve")]
39624#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39625#[cfg_attr(test, assert_instr(st2w))]
39626pub unsafe fn svst2_f32(pg: svbool_t, base: *mut f32, data: svfloat32x2_t) {
39627    unsafe extern "unadjusted" {
39628        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4f32")]
39629        fn _svst2_f32(data0: svfloat32_t, data1: svfloat32_t, pg: svbool4_t, ptr: *mut f32);
39630    }
39631    _svst2_f32(
39632        svget2_f32::<0>(data),
39633        svget2_f32::<1>(data),
39634        pg.sve_into(),
39635        base,
39636    )
39637}
39638#[doc = "Store two vectors into two-element tuples"]
39639#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f64])"]
39640#[doc = "## Safety"]
39641#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39642#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39643#[inline(always)]
39644#[target_feature(enable = "sve")]
39645#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39646#[cfg_attr(test, assert_instr(st2d))]
39647pub unsafe fn svst2_f64(pg: svbool_t, base: *mut f64, data: svfloat64x2_t) {
39648    unsafe extern "unadjusted" {
39649        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2f64")]
39650        fn _svst2_f64(data0: svfloat64_t, data1: svfloat64_t, pg: svbool2_t, ptr: *mut f64);
39651    }
39652    _svst2_f64(
39653        svget2_f64::<0>(data),
39654        svget2_f64::<1>(data),
39655        pg.sve_into(),
39656        base,
39657    )
39658}
39659#[doc = "Store two vectors into two-element tuples"]
39660#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s8])"]
39661#[doc = "## Safety"]
39662#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39663#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39664#[inline(always)]
39665#[target_feature(enable = "sve")]
39666#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39667#[cfg_attr(test, assert_instr(st2b))]
39668pub unsafe fn svst2_s8(pg: svbool_t, base: *mut i8, data: svint8x2_t) {
39669    unsafe extern "unadjusted" {
39670        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv16i8")]
39671        fn _svst2_s8(data0: svint8_t, data1: svint8_t, pg: svbool_t, ptr: *mut i8);
39672    }
39673    _svst2_s8(svget2_s8::<0>(data), svget2_s8::<1>(data), pg, base)
39674}
39675#[doc = "Store two vectors into two-element tuples"]
39676#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s16])"]
39677#[doc = "## Safety"]
39678#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39679#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39680#[inline(always)]
39681#[target_feature(enable = "sve")]
39682#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39683#[cfg_attr(test, assert_instr(st2h))]
39684pub unsafe fn svst2_s16(pg: svbool_t, base: *mut i16, data: svint16x2_t) {
39685    unsafe extern "unadjusted" {
39686        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv8i16")]
39687        fn _svst2_s16(data0: svint16_t, data1: svint16_t, pg: svbool8_t, ptr: *mut i16);
39688    }
39689    _svst2_s16(
39690        svget2_s16::<0>(data),
39691        svget2_s16::<1>(data),
39692        pg.sve_into(),
39693        base,
39694    )
39695}
39696#[doc = "Store two vectors into two-element tuples"]
39697#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s32])"]
39698#[doc = "## Safety"]
39699#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39700#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39701#[inline(always)]
39702#[target_feature(enable = "sve")]
39703#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39704#[cfg_attr(test, assert_instr(st2w))]
39705pub unsafe fn svst2_s32(pg: svbool_t, base: *mut i32, data: svint32x2_t) {
39706    unsafe extern "unadjusted" {
39707        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4i32")]
39708        fn _svst2_s32(data0: svint32_t, data1: svint32_t, pg: svbool4_t, ptr: *mut i32);
39709    }
39710    _svst2_s32(
39711        svget2_s32::<0>(data),
39712        svget2_s32::<1>(data),
39713        pg.sve_into(),
39714        base,
39715    )
39716}
39717#[doc = "Store two vectors into two-element tuples"]
39718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s64])"]
39719#[doc = "## Safety"]
39720#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39721#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39722#[inline(always)]
39723#[target_feature(enable = "sve")]
39724#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39725#[cfg_attr(test, assert_instr(st2d))]
39726pub unsafe fn svst2_s64(pg: svbool_t, base: *mut i64, data: svint64x2_t) {
39727    unsafe extern "unadjusted" {
39728        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2i64")]
39729        fn _svst2_s64(data0: svint64_t, data1: svint64_t, pg: svbool2_t, ptr: *mut i64);
39730    }
39731    _svst2_s64(
39732        svget2_s64::<0>(data),
39733        svget2_s64::<1>(data),
39734        pg.sve_into(),
39735        base,
39736    )
39737}
39738#[doc = "Store two vectors into two-element tuples"]
39739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u8])"]
39740#[doc = "## Safety"]
39741#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39742#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39743#[inline(always)]
39744#[target_feature(enable = "sve")]
39745#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39746#[cfg_attr(test, assert_instr(st2b))]
39747pub unsafe fn svst2_u8(pg: svbool_t, base: *mut u8, data: svuint8x2_t) {
39748    svst2_s8(pg, base.as_signed(), data.as_signed())
39749}
39750#[doc = "Store two vectors into two-element tuples"]
39751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u16])"]
39752#[doc = "## Safety"]
39753#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39754#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39755#[inline(always)]
39756#[target_feature(enable = "sve")]
39757#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39758#[cfg_attr(test, assert_instr(st2h))]
39759pub unsafe fn svst2_u16(pg: svbool_t, base: *mut u16, data: svuint16x2_t) {
39760    svst2_s16(pg, base.as_signed(), data.as_signed())
39761}
39762#[doc = "Store two vectors into two-element tuples"]
39763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u32])"]
39764#[doc = "## Safety"]
39765#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39766#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39767#[inline(always)]
39768#[target_feature(enable = "sve")]
39769#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39770#[cfg_attr(test, assert_instr(st2w))]
39771pub unsafe fn svst2_u32(pg: svbool_t, base: *mut u32, data: svuint32x2_t) {
39772    svst2_s32(pg, base.as_signed(), data.as_signed())
39773}
39774#[doc = "Store two vectors into two-element tuples"]
39775#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u64])"]
39776#[doc = "## Safety"]
39777#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39778#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39779#[inline(always)]
39780#[target_feature(enable = "sve")]
39781#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39782#[cfg_attr(test, assert_instr(st2d))]
39783pub unsafe fn svst2_u64(pg: svbool_t, base: *mut u64, data: svuint64x2_t) {
39784    svst2_s64(pg, base.as_signed(), data.as_signed())
39785}
39786#[doc = "Store two vectors into two-element tuples"]
39787#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f32])"]
39788#[doc = "## Safety"]
39789#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39790#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39791#[inline(always)]
39792#[target_feature(enable = "sve")]
39793#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39794#[cfg_attr(test, assert_instr(st2w))]
39795pub unsafe fn svst2_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x2_t) {
39796    svst2_f32(pg, base.offset(svcntw() as isize * vnum as isize), data)
39797}
39798#[doc = "Store two vectors into two-element tuples"]
39799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f64])"]
39800#[doc = "## Safety"]
39801#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39802#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39803#[inline(always)]
39804#[target_feature(enable = "sve")]
39805#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39806#[cfg_attr(test, assert_instr(st2d))]
39807pub unsafe fn svst2_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x2_t) {
39808    svst2_f64(pg, base.offset(svcntd() as isize * vnum as isize), data)
39809}
39810#[doc = "Store two vectors into two-element tuples"]
39811#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s8])"]
39812#[doc = "## Safety"]
39813#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39814#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39815#[inline(always)]
39816#[target_feature(enable = "sve")]
39817#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39818#[cfg_attr(test, assert_instr(st2b))]
39819pub unsafe fn svst2_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x2_t) {
39820    svst2_s8(pg, base.offset(svcntb() as isize * vnum as isize), data)
39821}
39822#[doc = "Store two vectors into two-element tuples"]
39823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s16])"]
39824#[doc = "## Safety"]
39825#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39826#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39827#[inline(always)]
39828#[target_feature(enable = "sve")]
39829#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39830#[cfg_attr(test, assert_instr(st2h))]
39831pub unsafe fn svst2_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x2_t) {
39832    svst2_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
39833}
39834#[doc = "Store two vectors into two-element tuples"]
39835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s32])"]
39836#[doc = "## Safety"]
39837#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39838#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39839#[inline(always)]
39840#[target_feature(enable = "sve")]
39841#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39842#[cfg_attr(test, assert_instr(st2w))]
39843pub unsafe fn svst2_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x2_t) {
39844    svst2_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
39845}
39846#[doc = "Store two vectors into two-element tuples"]
39847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s64])"]
39848#[doc = "## Safety"]
39849#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39850#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39851#[inline(always)]
39852#[target_feature(enable = "sve")]
39853#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39854#[cfg_attr(test, assert_instr(st2d))]
39855pub unsafe fn svst2_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x2_t) {
39856    svst2_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
39857}
39858#[doc = "Store two vectors into two-element tuples"]
39859#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u8])"]
39860#[doc = "## Safety"]
39861#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39862#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39863#[inline(always)]
39864#[target_feature(enable = "sve")]
39865#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39866#[cfg_attr(test, assert_instr(st2b))]
39867pub unsafe fn svst2_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x2_t) {
39868    svst2_u8(pg, base.offset(svcntb() as isize * vnum as isize), data)
39869}
39870#[doc = "Store two vectors into two-element tuples"]
39871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u16])"]
39872#[doc = "## Safety"]
39873#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39874#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39875#[inline(always)]
39876#[target_feature(enable = "sve")]
39877#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39878#[cfg_attr(test, assert_instr(st2h))]
39879pub unsafe fn svst2_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x2_t) {
39880    svst2_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
39881}
39882#[doc = "Store two vectors into two-element tuples"]
39883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u32])"]
39884#[doc = "## Safety"]
39885#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39886#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39887#[inline(always)]
39888#[target_feature(enable = "sve")]
39889#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39890#[cfg_attr(test, assert_instr(st2w))]
39891pub unsafe fn svst2_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x2_t) {
39892    svst2_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
39893}
39894#[doc = "Store two vectors into two-element tuples"]
39895#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u64])"]
39896#[doc = "## Safety"]
39897#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39898#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39899#[inline(always)]
39900#[target_feature(enable = "sve")]
39901#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39902#[cfg_attr(test, assert_instr(st2d))]
39903pub unsafe fn svst2_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x2_t) {
39904    svst2_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
39905}
39906#[doc = "Store three vectors into three-element tuples"]
39907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f32])"]
39908#[doc = "## Safety"]
39909#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39910#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39911#[inline(always)]
39912#[target_feature(enable = "sve")]
39913#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39914#[cfg_attr(test, assert_instr(st3w))]
39915pub unsafe fn svst3_f32(pg: svbool_t, base: *mut f32, data: svfloat32x3_t) {
39916    unsafe extern "unadjusted" {
39917        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4f32")]
39918        fn _svst3_f32(
39919            data0: svfloat32_t,
39920            data1: svfloat32_t,
39921            data2: svfloat32_t,
39922            pg: svbool4_t,
39923            ptr: *mut f32,
39924        );
39925    }
39926    _svst3_f32(
39927        svget3_f32::<0>(data),
39928        svget3_f32::<1>(data),
39929        svget3_f32::<2>(data),
39930        pg.sve_into(),
39931        base,
39932    )
39933}
39934#[doc = "Store three vectors into three-element tuples"]
39935#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f64])"]
39936#[doc = "## Safety"]
39937#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39938#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39939#[inline(always)]
39940#[target_feature(enable = "sve")]
39941#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39942#[cfg_attr(test, assert_instr(st3d))]
39943pub unsafe fn svst3_f64(pg: svbool_t, base: *mut f64, data: svfloat64x3_t) {
39944    unsafe extern "unadjusted" {
39945        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2f64")]
39946        fn _svst3_f64(
39947            data0: svfloat64_t,
39948            data1: svfloat64_t,
39949            data2: svfloat64_t,
39950            pg: svbool2_t,
39951            ptr: *mut f64,
39952        );
39953    }
39954    _svst3_f64(
39955        svget3_f64::<0>(data),
39956        svget3_f64::<1>(data),
39957        svget3_f64::<2>(data),
39958        pg.sve_into(),
39959        base,
39960    )
39961}
39962#[doc = "Store three vectors into three-element tuples"]
39963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s8])"]
39964#[doc = "## Safety"]
39965#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39966#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39967#[inline(always)]
39968#[target_feature(enable = "sve")]
39969#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39970#[cfg_attr(test, assert_instr(st3b))]
39971pub unsafe fn svst3_s8(pg: svbool_t, base: *mut i8, data: svint8x3_t) {
39972    unsafe extern "unadjusted" {
39973        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv16i8")]
39974        fn _svst3_s8(data0: svint8_t, data1: svint8_t, data2: svint8_t, pg: svbool_t, ptr: *mut i8);
39975    }
39976    _svst3_s8(
39977        svget3_s8::<0>(data),
39978        svget3_s8::<1>(data),
39979        svget3_s8::<2>(data),
39980        pg,
39981        base,
39982    )
39983}
39984#[doc = "Store three vectors into three-element tuples"]
39985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s16])"]
39986#[doc = "## Safety"]
39987#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
39988#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
39989#[inline(always)]
39990#[target_feature(enable = "sve")]
39991#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
39992#[cfg_attr(test, assert_instr(st3h))]
39993pub unsafe fn svst3_s16(pg: svbool_t, base: *mut i16, data: svint16x3_t) {
39994    unsafe extern "unadjusted" {
39995        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv8i16")]
39996        fn _svst3_s16(
39997            data0: svint16_t,
39998            data1: svint16_t,
39999            data2: svint16_t,
40000            pg: svbool8_t,
40001            ptr: *mut i16,
40002        );
40003    }
40004    _svst3_s16(
40005        svget3_s16::<0>(data),
40006        svget3_s16::<1>(data),
40007        svget3_s16::<2>(data),
40008        pg.sve_into(),
40009        base,
40010    )
40011}
40012#[doc = "Store three vectors into three-element tuples"]
40013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s32])"]
40014#[doc = "## Safety"]
40015#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40016#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40017#[inline(always)]
40018#[target_feature(enable = "sve")]
40019#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40020#[cfg_attr(test, assert_instr(st3w))]
40021pub unsafe fn svst3_s32(pg: svbool_t, base: *mut i32, data: svint32x3_t) {
40022    unsafe extern "unadjusted" {
40023        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4i32")]
40024        fn _svst3_s32(
40025            data0: svint32_t,
40026            data1: svint32_t,
40027            data2: svint32_t,
40028            pg: svbool4_t,
40029            ptr: *mut i32,
40030        );
40031    }
40032    _svst3_s32(
40033        svget3_s32::<0>(data),
40034        svget3_s32::<1>(data),
40035        svget3_s32::<2>(data),
40036        pg.sve_into(),
40037        base,
40038    )
40039}
40040#[doc = "Store three vectors into three-element tuples"]
40041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s64])"]
40042#[doc = "## Safety"]
40043#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40044#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40045#[inline(always)]
40046#[target_feature(enable = "sve")]
40047#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40048#[cfg_attr(test, assert_instr(st3d))]
40049pub unsafe fn svst3_s64(pg: svbool_t, base: *mut i64, data: svint64x3_t) {
40050    unsafe extern "unadjusted" {
40051        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2i64")]
40052        fn _svst3_s64(
40053            data0: svint64_t,
40054            data1: svint64_t,
40055            data2: svint64_t,
40056            pg: svbool2_t,
40057            ptr: *mut i64,
40058        );
40059    }
40060    _svst3_s64(
40061        svget3_s64::<0>(data),
40062        svget3_s64::<1>(data),
40063        svget3_s64::<2>(data),
40064        pg.sve_into(),
40065        base,
40066    )
40067}
40068#[doc = "Store three vectors into three-element tuples"]
40069#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u8])"]
40070#[doc = "## Safety"]
40071#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40072#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40073#[inline(always)]
40074#[target_feature(enable = "sve")]
40075#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40076#[cfg_attr(test, assert_instr(st3b))]
40077pub unsafe fn svst3_u8(pg: svbool_t, base: *mut u8, data: svuint8x3_t) {
40078    svst3_s8(pg, base.as_signed(), data.as_signed())
40079}
40080#[doc = "Store three vectors into three-element tuples"]
40081#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u16])"]
40082#[doc = "## Safety"]
40083#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40084#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40085#[inline(always)]
40086#[target_feature(enable = "sve")]
40087#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40088#[cfg_attr(test, assert_instr(st3h))]
40089pub unsafe fn svst3_u16(pg: svbool_t, base: *mut u16, data: svuint16x3_t) {
40090    svst3_s16(pg, base.as_signed(), data.as_signed())
40091}
40092#[doc = "Store three vectors into three-element tuples"]
40093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u32])"]
40094#[doc = "## Safety"]
40095#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40096#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40097#[inline(always)]
40098#[target_feature(enable = "sve")]
40099#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40100#[cfg_attr(test, assert_instr(st3w))]
40101pub unsafe fn svst3_u32(pg: svbool_t, base: *mut u32, data: svuint32x3_t) {
40102    svst3_s32(pg, base.as_signed(), data.as_signed())
40103}
40104#[doc = "Store three vectors into three-element tuples"]
40105#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u64])"]
40106#[doc = "## Safety"]
40107#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40108#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40109#[inline(always)]
40110#[target_feature(enable = "sve")]
40111#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40112#[cfg_attr(test, assert_instr(st3d))]
40113pub unsafe fn svst3_u64(pg: svbool_t, base: *mut u64, data: svuint64x3_t) {
40114    svst3_s64(pg, base.as_signed(), data.as_signed())
40115}
40116#[doc = "Store three vectors into three-element tuples"]
40117#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f32])"]
40118#[doc = "## Safety"]
40119#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40120#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40121#[inline(always)]
40122#[target_feature(enable = "sve")]
40123#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40124#[cfg_attr(test, assert_instr(st3w))]
40125pub unsafe fn svst3_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x3_t) {
40126    svst3_f32(pg, base.offset(svcntw() as isize * vnum as isize), data)
40127}
40128#[doc = "Store three vectors into three-element tuples"]
40129#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f64])"]
40130#[doc = "## Safety"]
40131#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40132#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40133#[inline(always)]
40134#[target_feature(enable = "sve")]
40135#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40136#[cfg_attr(test, assert_instr(st3d))]
40137pub unsafe fn svst3_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x3_t) {
40138    svst3_f64(pg, base.offset(svcntd() as isize * vnum as isize), data)
40139}
40140#[doc = "Store three vectors into three-element tuples"]
40141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s8])"]
40142#[doc = "## Safety"]
40143#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40144#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40145#[inline(always)]
40146#[target_feature(enable = "sve")]
40147#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40148#[cfg_attr(test, assert_instr(st3b))]
40149pub unsafe fn svst3_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x3_t) {
40150    svst3_s8(pg, base.offset(svcntb() as isize * vnum as isize), data)
40151}
40152#[doc = "Store three vectors into three-element tuples"]
40153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s16])"]
40154#[doc = "## Safety"]
40155#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40156#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40157#[inline(always)]
40158#[target_feature(enable = "sve")]
40159#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40160#[cfg_attr(test, assert_instr(st3h))]
40161pub unsafe fn svst3_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x3_t) {
40162    svst3_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
40163}
40164#[doc = "Store three vectors into three-element tuples"]
40165#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s32])"]
40166#[doc = "## Safety"]
40167#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40168#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40169#[inline(always)]
40170#[target_feature(enable = "sve")]
40171#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40172#[cfg_attr(test, assert_instr(st3w))]
40173pub unsafe fn svst3_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x3_t) {
40174    svst3_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
40175}
40176#[doc = "Store three vectors into three-element tuples"]
40177#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s64])"]
40178#[doc = "## Safety"]
40179#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40180#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40181#[inline(always)]
40182#[target_feature(enable = "sve")]
40183#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40184#[cfg_attr(test, assert_instr(st3d))]
40185pub unsafe fn svst3_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x3_t) {
40186    svst3_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
40187}
40188#[doc = "Store three vectors into three-element tuples"]
40189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u8])"]
40190#[doc = "## Safety"]
40191#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40192#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40193#[inline(always)]
40194#[target_feature(enable = "sve")]
40195#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40196#[cfg_attr(test, assert_instr(st3b))]
40197pub unsafe fn svst3_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x3_t) {
40198    svst3_u8(pg, base.offset(svcntb() as isize * vnum as isize), data)
40199}
40200#[doc = "Store three vectors into three-element tuples"]
40201#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u16])"]
40202#[doc = "## Safety"]
40203#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40204#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40205#[inline(always)]
40206#[target_feature(enable = "sve")]
40207#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40208#[cfg_attr(test, assert_instr(st3h))]
40209pub unsafe fn svst3_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x3_t) {
40210    svst3_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
40211}
40212#[doc = "Store three vectors into three-element tuples"]
40213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u32])"]
40214#[doc = "## Safety"]
40215#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40216#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40217#[inline(always)]
40218#[target_feature(enable = "sve")]
40219#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40220#[cfg_attr(test, assert_instr(st3w))]
40221pub unsafe fn svst3_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x3_t) {
40222    svst3_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
40223}
40224#[doc = "Store three vectors into three-element tuples"]
40225#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u64])"]
40226#[doc = "## Safety"]
40227#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40228#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40229#[inline(always)]
40230#[target_feature(enable = "sve")]
40231#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40232#[cfg_attr(test, assert_instr(st3d))]
40233pub unsafe fn svst3_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x3_t) {
40234    svst3_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
40235}
40236#[doc = "Store four vectors into four-element tuples"]
40237#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f32])"]
40238#[doc = "## Safety"]
40239#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40240#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40241#[inline(always)]
40242#[target_feature(enable = "sve")]
40243#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40244#[cfg_attr(test, assert_instr(st4w))]
40245pub unsafe fn svst4_f32(pg: svbool_t, base: *mut f32, data: svfloat32x4_t) {
40246    unsafe extern "unadjusted" {
40247        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4f32")]
40248        fn _svst4_f32(
40249            data0: svfloat32_t,
40250            data1: svfloat32_t,
40251            data2: svfloat32_t,
40252            data3: svfloat32_t,
40253            pg: svbool4_t,
40254            ptr: *mut f32,
40255        );
40256    }
40257    _svst4_f32(
40258        svget4_f32::<0>(data),
40259        svget4_f32::<1>(data),
40260        svget4_f32::<2>(data),
40261        svget4_f32::<3>(data),
40262        pg.sve_into(),
40263        base,
40264    )
40265}
40266#[doc = "Store four vectors into four-element tuples"]
40267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f64])"]
40268#[doc = "## Safety"]
40269#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40270#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40271#[inline(always)]
40272#[target_feature(enable = "sve")]
40273#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40274#[cfg_attr(test, assert_instr(st4d))]
40275pub unsafe fn svst4_f64(pg: svbool_t, base: *mut f64, data: svfloat64x4_t) {
40276    unsafe extern "unadjusted" {
40277        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2f64")]
40278        fn _svst4_f64(
40279            data0: svfloat64_t,
40280            data1: svfloat64_t,
40281            data2: svfloat64_t,
40282            data3: svfloat64_t,
40283            pg: svbool2_t,
40284            ptr: *mut f64,
40285        );
40286    }
40287    _svst4_f64(
40288        svget4_f64::<0>(data),
40289        svget4_f64::<1>(data),
40290        svget4_f64::<2>(data),
40291        svget4_f64::<3>(data),
40292        pg.sve_into(),
40293        base,
40294    )
40295}
40296#[doc = "Store four vectors into four-element tuples"]
40297#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s8])"]
40298#[doc = "## Safety"]
40299#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40300#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40301#[inline(always)]
40302#[target_feature(enable = "sve")]
40303#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40304#[cfg_attr(test, assert_instr(st4b))]
40305pub unsafe fn svst4_s8(pg: svbool_t, base: *mut i8, data: svint8x4_t) {
40306    unsafe extern "unadjusted" {
40307        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv16i8")]
40308        fn _svst4_s8(
40309            data0: svint8_t,
40310            data1: svint8_t,
40311            data2: svint8_t,
40312            data3: svint8_t,
40313            pg: svbool_t,
40314            ptr: *mut i8,
40315        );
40316    }
40317    _svst4_s8(
40318        svget4_s8::<0>(data),
40319        svget4_s8::<1>(data),
40320        svget4_s8::<2>(data),
40321        svget4_s8::<3>(data),
40322        pg,
40323        base,
40324    )
40325}
40326#[doc = "Store four vectors into four-element tuples"]
40327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s16])"]
40328#[doc = "## Safety"]
40329#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40330#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40331#[inline(always)]
40332#[target_feature(enable = "sve")]
40333#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40334#[cfg_attr(test, assert_instr(st4h))]
40335pub unsafe fn svst4_s16(pg: svbool_t, base: *mut i16, data: svint16x4_t) {
40336    unsafe extern "unadjusted" {
40337        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv8i16")]
40338        fn _svst4_s16(
40339            data0: svint16_t,
40340            data1: svint16_t,
40341            data2: svint16_t,
40342            data3: svint16_t,
40343            pg: svbool8_t,
40344            ptr: *mut i16,
40345        );
40346    }
40347    _svst4_s16(
40348        svget4_s16::<0>(data),
40349        svget4_s16::<1>(data),
40350        svget4_s16::<2>(data),
40351        svget4_s16::<3>(data),
40352        pg.sve_into(),
40353        base,
40354    )
40355}
40356#[doc = "Store four vectors into four-element tuples"]
40357#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s32])"]
40358#[doc = "## Safety"]
40359#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40360#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40361#[inline(always)]
40362#[target_feature(enable = "sve")]
40363#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40364#[cfg_attr(test, assert_instr(st4w))]
40365pub unsafe fn svst4_s32(pg: svbool_t, base: *mut i32, data: svint32x4_t) {
40366    unsafe extern "unadjusted" {
40367        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4i32")]
40368        fn _svst4_s32(
40369            data0: svint32_t,
40370            data1: svint32_t,
40371            data2: svint32_t,
40372            data3: svint32_t,
40373            pg: svbool4_t,
40374            ptr: *mut i32,
40375        );
40376    }
40377    _svst4_s32(
40378        svget4_s32::<0>(data),
40379        svget4_s32::<1>(data),
40380        svget4_s32::<2>(data),
40381        svget4_s32::<3>(data),
40382        pg.sve_into(),
40383        base,
40384    )
40385}
40386#[doc = "Store four vectors into four-element tuples"]
40387#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s64])"]
40388#[doc = "## Safety"]
40389#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40390#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40391#[inline(always)]
40392#[target_feature(enable = "sve")]
40393#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40394#[cfg_attr(test, assert_instr(st4d))]
40395pub unsafe fn svst4_s64(pg: svbool_t, base: *mut i64, data: svint64x4_t) {
40396    unsafe extern "unadjusted" {
40397        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2i64")]
40398        fn _svst4_s64(
40399            data0: svint64_t,
40400            data1: svint64_t,
40401            data2: svint64_t,
40402            data3: svint64_t,
40403            pg: svbool2_t,
40404            ptr: *mut i64,
40405        );
40406    }
40407    _svst4_s64(
40408        svget4_s64::<0>(data),
40409        svget4_s64::<1>(data),
40410        svget4_s64::<2>(data),
40411        svget4_s64::<3>(data),
40412        pg.sve_into(),
40413        base,
40414    )
40415}
40416#[doc = "Store four vectors into four-element tuples"]
40417#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u8])"]
40418#[doc = "## Safety"]
40419#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40420#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40421#[inline(always)]
40422#[target_feature(enable = "sve")]
40423#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40424#[cfg_attr(test, assert_instr(st4b))]
40425pub unsafe fn svst4_u8(pg: svbool_t, base: *mut u8, data: svuint8x4_t) {
40426    svst4_s8(pg, base.as_signed(), data.as_signed())
40427}
40428#[doc = "Store four vectors into four-element tuples"]
40429#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u16])"]
40430#[doc = "## Safety"]
40431#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40432#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40433#[inline(always)]
40434#[target_feature(enable = "sve")]
40435#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40436#[cfg_attr(test, assert_instr(st4h))]
40437pub unsafe fn svst4_u16(pg: svbool_t, base: *mut u16, data: svuint16x4_t) {
40438    svst4_s16(pg, base.as_signed(), data.as_signed())
40439}
40440#[doc = "Store four vectors into four-element tuples"]
40441#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u32])"]
40442#[doc = "## Safety"]
40443#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40444#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40445#[inline(always)]
40446#[target_feature(enable = "sve")]
40447#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40448#[cfg_attr(test, assert_instr(st4w))]
40449pub unsafe fn svst4_u32(pg: svbool_t, base: *mut u32, data: svuint32x4_t) {
40450    svst4_s32(pg, base.as_signed(), data.as_signed())
40451}
40452#[doc = "Store four vectors into four-element tuples"]
40453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u64])"]
40454#[doc = "## Safety"]
40455#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40456#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40457#[inline(always)]
40458#[target_feature(enable = "sve")]
40459#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40460#[cfg_attr(test, assert_instr(st4d))]
40461pub unsafe fn svst4_u64(pg: svbool_t, base: *mut u64, data: svuint64x4_t) {
40462    svst4_s64(pg, base.as_signed(), data.as_signed())
40463}
40464#[doc = "Store four vectors into four-element tuples"]
40465#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f32])"]
40466#[doc = "## Safety"]
40467#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40468#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40469#[inline(always)]
40470#[target_feature(enable = "sve")]
40471#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40472#[cfg_attr(test, assert_instr(st4w))]
40473pub unsafe fn svst4_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x4_t) {
40474    svst4_f32(pg, base.offset(svcntw() as isize * vnum as isize), data)
40475}
40476#[doc = "Store four vectors into four-element tuples"]
40477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f64])"]
40478#[doc = "## Safety"]
40479#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40480#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40481#[inline(always)]
40482#[target_feature(enable = "sve")]
40483#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40484#[cfg_attr(test, assert_instr(st4d))]
40485pub unsafe fn svst4_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x4_t) {
40486    svst4_f64(pg, base.offset(svcntd() as isize * vnum as isize), data)
40487}
40488#[doc = "Store four vectors into four-element tuples"]
40489#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s8])"]
40490#[doc = "## Safety"]
40491#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40492#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40493#[inline(always)]
40494#[target_feature(enable = "sve")]
40495#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40496#[cfg_attr(test, assert_instr(st4b))]
40497pub unsafe fn svst4_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x4_t) {
40498    svst4_s8(pg, base.offset(svcntb() as isize * vnum as isize), data)
40499}
40500#[doc = "Store four vectors into four-element tuples"]
40501#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s16])"]
40502#[doc = "## Safety"]
40503#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40504#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40505#[inline(always)]
40506#[target_feature(enable = "sve")]
40507#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40508#[cfg_attr(test, assert_instr(st4h))]
40509pub unsafe fn svst4_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x4_t) {
40510    svst4_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
40511}
40512#[doc = "Store four vectors into four-element tuples"]
40513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s32])"]
40514#[doc = "## Safety"]
40515#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40516#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40517#[inline(always)]
40518#[target_feature(enable = "sve")]
40519#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40520#[cfg_attr(test, assert_instr(st4w))]
40521pub unsafe fn svst4_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x4_t) {
40522    svst4_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
40523}
40524#[doc = "Store four vectors into four-element tuples"]
40525#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s64])"]
40526#[doc = "## Safety"]
40527#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40528#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40529#[inline(always)]
40530#[target_feature(enable = "sve")]
40531#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40532#[cfg_attr(test, assert_instr(st4d))]
40533pub unsafe fn svst4_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x4_t) {
40534    svst4_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
40535}
40536#[doc = "Store four vectors into four-element tuples"]
40537#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u8])"]
40538#[doc = "## Safety"]
40539#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40540#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40541#[inline(always)]
40542#[target_feature(enable = "sve")]
40543#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40544#[cfg_attr(test, assert_instr(st4b))]
40545pub unsafe fn svst4_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x4_t) {
40546    svst4_u8(pg, base.offset(svcntb() as isize * vnum as isize), data)
40547}
40548#[doc = "Store four vectors into four-element tuples"]
40549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u16])"]
40550#[doc = "## Safety"]
40551#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40552#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40553#[inline(always)]
40554#[target_feature(enable = "sve")]
40555#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40556#[cfg_attr(test, assert_instr(st4h))]
40557pub unsafe fn svst4_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x4_t) {
40558    svst4_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
40559}
40560#[doc = "Store four vectors into four-element tuples"]
40561#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u32])"]
40562#[doc = "## Safety"]
40563#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40564#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40565#[inline(always)]
40566#[target_feature(enable = "sve")]
40567#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40568#[cfg_attr(test, assert_instr(st4w))]
40569pub unsafe fn svst4_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x4_t) {
40570    svst4_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
40571}
40572#[doc = "Store four vectors into four-element tuples"]
40573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u64])"]
40574#[doc = "## Safety"]
40575#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
40576#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40577#[inline(always)]
40578#[target_feature(enable = "sve")]
40579#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40580#[cfg_attr(test, assert_instr(st4d))]
40581pub unsafe fn svst4_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x4_t) {
40582    svst4_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
40583}
40584#[doc = "Non-truncating store, non-temporal"]
40585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f32])"]
40586#[doc = "## Safety"]
40587#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40588#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40589#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40590#[inline(always)]
40591#[target_feature(enable = "sve")]
40592#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40593#[cfg_attr(test, assert_instr(stnt1w))]
40594pub unsafe fn svstnt1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) {
40595    unsafe extern "unadjusted" {
40596        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4f32")]
40597        fn _svstnt1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32);
40598    }
40599    _svstnt1_f32(data, pg.sve_into(), base)
40600}
40601#[doc = "Non-truncating store, non-temporal"]
40602#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f64])"]
40603#[doc = "## Safety"]
40604#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40605#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40606#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40607#[inline(always)]
40608#[target_feature(enable = "sve")]
40609#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40610#[cfg_attr(test, assert_instr(stnt1d))]
40611pub unsafe fn svstnt1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) {
40612    unsafe extern "unadjusted" {
40613        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2f64")]
40614        fn _svstnt1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64);
40615    }
40616    _svstnt1_f64(data, pg.sve_into(), base)
40617}
40618#[doc = "Non-truncating store, non-temporal"]
40619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s8])"]
40620#[doc = "## Safety"]
40621#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40622#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40623#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40624#[inline(always)]
40625#[target_feature(enable = "sve")]
40626#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40627#[cfg_attr(test, assert_instr(stnt1b))]
40628pub unsafe fn svstnt1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) {
40629    unsafe extern "unadjusted" {
40630        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv16i8")]
40631        fn _svstnt1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8);
40632    }
40633    _svstnt1_s8(data, pg, base)
40634}
40635#[doc = "Non-truncating store, non-temporal"]
40636#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s16])"]
40637#[doc = "## Safety"]
40638#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40639#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40640#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40641#[inline(always)]
40642#[target_feature(enable = "sve")]
40643#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40644#[cfg_attr(test, assert_instr(stnt1h))]
40645pub unsafe fn svstnt1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) {
40646    unsafe extern "unadjusted" {
40647        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv8i16")]
40648        fn _svstnt1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16);
40649    }
40650    _svstnt1_s16(data, pg.sve_into(), base)
40651}
40652#[doc = "Non-truncating store, non-temporal"]
40653#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s32])"]
40654#[doc = "## Safety"]
40655#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40656#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40657#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40658#[inline(always)]
40659#[target_feature(enable = "sve")]
40660#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40661#[cfg_attr(test, assert_instr(stnt1w))]
40662pub unsafe fn svstnt1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) {
40663    unsafe extern "unadjusted" {
40664        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4i32")]
40665        fn _svstnt1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32);
40666    }
40667    _svstnt1_s32(data, pg.sve_into(), base)
40668}
40669#[doc = "Non-truncating store, non-temporal"]
40670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s64])"]
40671#[doc = "## Safety"]
40672#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40673#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40674#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40675#[inline(always)]
40676#[target_feature(enable = "sve")]
40677#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40678#[cfg_attr(test, assert_instr(stnt1d))]
40679pub unsafe fn svstnt1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) {
40680    unsafe extern "unadjusted" {
40681        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2i64")]
40682        fn _svstnt1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64);
40683    }
40684    _svstnt1_s64(data, pg.sve_into(), base)
40685}
40686#[doc = "Non-truncating store, non-temporal"]
40687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u8])"]
40688#[doc = "## Safety"]
40689#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40690#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40691#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40692#[inline(always)]
40693#[target_feature(enable = "sve")]
40694#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40695#[cfg_attr(test, assert_instr(stnt1b))]
40696pub unsafe fn svstnt1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) {
40697    svstnt1_s8(pg, base.as_signed(), data.as_signed())
40698}
40699#[doc = "Non-truncating store, non-temporal"]
40700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u16])"]
40701#[doc = "## Safety"]
40702#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40703#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40704#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40705#[inline(always)]
40706#[target_feature(enable = "sve")]
40707#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40708#[cfg_attr(test, assert_instr(stnt1h))]
40709pub unsafe fn svstnt1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) {
40710    svstnt1_s16(pg, base.as_signed(), data.as_signed())
40711}
40712#[doc = "Non-truncating store, non-temporal"]
40713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u32])"]
40714#[doc = "## Safety"]
40715#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40716#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40717#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40718#[inline(always)]
40719#[target_feature(enable = "sve")]
40720#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40721#[cfg_attr(test, assert_instr(stnt1w))]
40722pub unsafe fn svstnt1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) {
40723    svstnt1_s32(pg, base.as_signed(), data.as_signed())
40724}
40725#[doc = "Non-truncating store, non-temporal"]
40726#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u64])"]
40727#[doc = "## Safety"]
40728#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40729#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40730#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40731#[inline(always)]
40732#[target_feature(enable = "sve")]
40733#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40734#[cfg_attr(test, assert_instr(stnt1d))]
40735pub unsafe fn svstnt1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) {
40736    svstnt1_s64(pg, base.as_signed(), data.as_signed())
40737}
40738#[doc = "Non-truncating store, non-temporal"]
40739#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f32])"]
40740#[doc = "## Safety"]
40741#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40742#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40743#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40744#[inline(always)]
40745#[target_feature(enable = "sve")]
40746#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40747#[cfg_attr(test, assert_instr(stnt1w))]
40748pub unsafe fn svstnt1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) {
40749    svstnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data)
40750}
40751#[doc = "Non-truncating store, non-temporal"]
40752#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f64])"]
40753#[doc = "## Safety"]
40754#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40755#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40756#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40757#[inline(always)]
40758#[target_feature(enable = "sve")]
40759#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40760#[cfg_attr(test, assert_instr(stnt1d))]
40761pub unsafe fn svstnt1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) {
40762    svstnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data)
40763}
40764#[doc = "Non-truncating store, non-temporal"]
40765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s8])"]
40766#[doc = "## Safety"]
40767#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40768#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40769#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40770#[inline(always)]
40771#[target_feature(enable = "sve")]
40772#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40773#[cfg_attr(test, assert_instr(stnt1b))]
40774pub unsafe fn svstnt1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) {
40775    svstnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data)
40776}
40777#[doc = "Non-truncating store, non-temporal"]
40778#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s16])"]
40779#[doc = "## Safety"]
40780#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40781#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40782#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40783#[inline(always)]
40784#[target_feature(enable = "sve")]
40785#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40786#[cfg_attr(test, assert_instr(stnt1h))]
40787pub unsafe fn svstnt1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) {
40788    svstnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
40789}
40790#[doc = "Non-truncating store, non-temporal"]
40791#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s32])"]
40792#[doc = "## Safety"]
40793#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40794#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40795#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40796#[inline(always)]
40797#[target_feature(enable = "sve")]
40798#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40799#[cfg_attr(test, assert_instr(stnt1w))]
40800pub unsafe fn svstnt1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) {
40801    svstnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
40802}
40803#[doc = "Non-truncating store, non-temporal"]
40804#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s64])"]
40805#[doc = "## Safety"]
40806#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40807#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40808#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40809#[inline(always)]
40810#[target_feature(enable = "sve")]
40811#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40812#[cfg_attr(test, assert_instr(stnt1d))]
40813pub unsafe fn svstnt1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) {
40814    svstnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
40815}
40816#[doc = "Non-truncating store, non-temporal"]
40817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u8])"]
40818#[doc = "## Safety"]
40819#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40820#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40821#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40822#[inline(always)]
40823#[target_feature(enable = "sve")]
40824#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40825#[cfg_attr(test, assert_instr(stnt1b))]
40826pub unsafe fn svstnt1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) {
40827    svstnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data)
40828}
40829#[doc = "Non-truncating store, non-temporal"]
40830#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u16])"]
40831#[doc = "## Safety"]
40832#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40833#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40834#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40835#[inline(always)]
40836#[target_feature(enable = "sve")]
40837#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40838#[cfg_attr(test, assert_instr(stnt1h))]
40839pub unsafe fn svstnt1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) {
40840    svstnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
40841}
40842#[doc = "Non-truncating store, non-temporal"]
40843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u32])"]
40844#[doc = "## Safety"]
40845#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40846#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40847#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40848#[inline(always)]
40849#[target_feature(enable = "sve")]
40850#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40851#[cfg_attr(test, assert_instr(stnt1w))]
40852pub unsafe fn svstnt1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) {
40853    svstnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
40854}
40855#[doc = "Non-truncating store, non-temporal"]
40856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u64])"]
40857#[doc = "## Safety"]
40858#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
40859#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
40860#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
40861#[inline(always)]
40862#[target_feature(enable = "sve")]
40863#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40864#[cfg_attr(test, assert_instr(stnt1d))]
40865pub unsafe fn svstnt1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) {
40866    svstnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
40867}
40868#[doc = "Subtract"]
40869#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"]
40870#[inline(always)]
40871#[target_feature(enable = "sve")]
40872#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40873#[cfg_attr(test, assert_instr(fsub))]
40874pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
40875    unsafe extern "unadjusted" {
40876        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")]
40877        fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
40878    }
40879    unsafe { _svsub_f32_m(pg.sve_into(), op1, op2) }
40880}
40881#[doc = "Subtract"]
40882#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"]
40883#[inline(always)]
40884#[target_feature(enable = "sve")]
40885#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40886#[cfg_attr(test, assert_instr(fsub))]
40887pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
40888    svsub_f32_m(pg, op1, svdup_n_f32(op2))
40889}
40890#[doc = "Subtract"]
40891#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"]
40892#[inline(always)]
40893#[target_feature(enable = "sve")]
40894#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40895#[cfg_attr(test, assert_instr(fsub))]
40896pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
40897    svsub_f32_m(pg, op1, op2)
40898}
40899#[doc = "Subtract"]
40900#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"]
40901#[inline(always)]
40902#[target_feature(enable = "sve")]
40903#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40904#[cfg_attr(test, assert_instr(fsub))]
40905pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
40906    svsub_f32_x(pg, op1, svdup_n_f32(op2))
40907}
40908#[doc = "Subtract"]
40909#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"]
40910#[inline(always)]
40911#[target_feature(enable = "sve")]
40912#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40913#[cfg_attr(test, assert_instr(fsub))]
40914pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
40915    svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
40916}
40917#[doc = "Subtract"]
40918#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"]
40919#[inline(always)]
40920#[target_feature(enable = "sve")]
40921#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40922#[cfg_attr(test, assert_instr(fsub))]
40923pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
40924    svsub_f32_z(pg, op1, svdup_n_f32(op2))
40925}
40926#[doc = "Subtract"]
40927#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"]
40928#[inline(always)]
40929#[target_feature(enable = "sve")]
40930#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40931#[cfg_attr(test, assert_instr(fsub))]
40932pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
40933    unsafe extern "unadjusted" {
40934        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")]
40935        fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
40936    }
40937    unsafe { _svsub_f64_m(pg.sve_into(), op1, op2) }
40938}
40939#[doc = "Subtract"]
40940#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"]
40941#[inline(always)]
40942#[target_feature(enable = "sve")]
40943#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40944#[cfg_attr(test, assert_instr(fsub))]
40945pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
40946    svsub_f64_m(pg, op1, svdup_n_f64(op2))
40947}
40948#[doc = "Subtract"]
40949#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"]
40950#[inline(always)]
40951#[target_feature(enable = "sve")]
40952#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40953#[cfg_attr(test, assert_instr(fsub))]
40954pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
40955    svsub_f64_m(pg, op1, op2)
40956}
40957#[doc = "Subtract"]
40958#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"]
40959#[inline(always)]
40960#[target_feature(enable = "sve")]
40961#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40962#[cfg_attr(test, assert_instr(fsub))]
40963pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
40964    svsub_f64_x(pg, op1, svdup_n_f64(op2))
40965}
40966#[doc = "Subtract"]
40967#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"]
40968#[inline(always)]
40969#[target_feature(enable = "sve")]
40970#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40971#[cfg_attr(test, assert_instr(fsub))]
40972pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
40973    svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
40974}
40975#[doc = "Subtract"]
40976#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"]
40977#[inline(always)]
40978#[target_feature(enable = "sve")]
40979#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40980#[cfg_attr(test, assert_instr(fsub))]
40981pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
40982    svsub_f64_z(pg, op1, svdup_n_f64(op2))
40983}
40984#[doc = "Subtract"]
40985#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"]
40986#[inline(always)]
40987#[target_feature(enable = "sve")]
40988#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
40989#[cfg_attr(test, assert_instr(sub))]
40990pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
40991    unsafe extern "unadjusted" {
40992        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")]
40993        fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
40994    }
40995    unsafe { _svsub_s8_m(pg, op1, op2) }
40996}
40997#[doc = "Subtract"]
40998#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"]
40999#[inline(always)]
41000#[target_feature(enable = "sve")]
41001#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41002#[cfg_attr(test, assert_instr(sub))]
41003pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
41004    svsub_s8_m(pg, op1, svdup_n_s8(op2))
41005}
41006#[doc = "Subtract"]
41007#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"]
41008#[inline(always)]
41009#[target_feature(enable = "sve")]
41010#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41011#[cfg_attr(test, assert_instr(sub))]
41012pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
41013    svsub_s8_m(pg, op1, op2)
41014}
41015#[doc = "Subtract"]
41016#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"]
41017#[inline(always)]
41018#[target_feature(enable = "sve")]
41019#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41020#[cfg_attr(test, assert_instr(sub))]
41021pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
41022    svsub_s8_x(pg, op1, svdup_n_s8(op2))
41023}
41024#[doc = "Subtract"]
41025#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"]
41026#[inline(always)]
41027#[target_feature(enable = "sve")]
41028#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41029#[cfg_attr(test, assert_instr(sub))]
41030pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
41031    svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
41032}
41033#[doc = "Subtract"]
41034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"]
41035#[inline(always)]
41036#[target_feature(enable = "sve")]
41037#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41038#[cfg_attr(test, assert_instr(sub))]
41039pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
41040    svsub_s8_z(pg, op1, svdup_n_s8(op2))
41041}
41042#[doc = "Subtract"]
41043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"]
41044#[inline(always)]
41045#[target_feature(enable = "sve")]
41046#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41047#[cfg_attr(test, assert_instr(sub))]
41048pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
41049    unsafe extern "unadjusted" {
41050        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")]
41051        fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
41052    }
41053    unsafe { _svsub_s16_m(pg.sve_into(), op1, op2) }
41054}
41055#[doc = "Subtract"]
41056#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"]
41057#[inline(always)]
41058#[target_feature(enable = "sve")]
41059#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41060#[cfg_attr(test, assert_instr(sub))]
41061pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
41062    svsub_s16_m(pg, op1, svdup_n_s16(op2))
41063}
41064#[doc = "Subtract"]
41065#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"]
41066#[inline(always)]
41067#[target_feature(enable = "sve")]
41068#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41069#[cfg_attr(test, assert_instr(sub))]
41070pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
41071    svsub_s16_m(pg, op1, op2)
41072}
41073#[doc = "Subtract"]
41074#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"]
41075#[inline(always)]
41076#[target_feature(enable = "sve")]
41077#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41078#[cfg_attr(test, assert_instr(sub))]
41079pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
41080    svsub_s16_x(pg, op1, svdup_n_s16(op2))
41081}
41082#[doc = "Subtract"]
41083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"]
41084#[inline(always)]
41085#[target_feature(enable = "sve")]
41086#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41087#[cfg_attr(test, assert_instr(sub))]
41088pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
41089    svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
41090}
41091#[doc = "Subtract"]
41092#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"]
41093#[inline(always)]
41094#[target_feature(enable = "sve")]
41095#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41096#[cfg_attr(test, assert_instr(sub))]
41097pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
41098    svsub_s16_z(pg, op1, svdup_n_s16(op2))
41099}
41100#[doc = "Subtract"]
41101#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"]
41102#[inline(always)]
41103#[target_feature(enable = "sve")]
41104#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41105#[cfg_attr(test, assert_instr(sub))]
41106pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
41107    unsafe extern "unadjusted" {
41108        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")]
41109        fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
41110    }
41111    unsafe { _svsub_s32_m(pg.sve_into(), op1, op2) }
41112}
41113#[doc = "Subtract"]
41114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"]
41115#[inline(always)]
41116#[target_feature(enable = "sve")]
41117#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41118#[cfg_attr(test, assert_instr(sub))]
41119pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
41120    svsub_s32_m(pg, op1, svdup_n_s32(op2))
41121}
41122#[doc = "Subtract"]
41123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"]
41124#[inline(always)]
41125#[target_feature(enable = "sve")]
41126#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41127#[cfg_attr(test, assert_instr(sub))]
41128pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
41129    svsub_s32_m(pg, op1, op2)
41130}
41131#[doc = "Subtract"]
41132#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"]
41133#[inline(always)]
41134#[target_feature(enable = "sve")]
41135#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41136#[cfg_attr(test, assert_instr(sub))]
41137pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
41138    svsub_s32_x(pg, op1, svdup_n_s32(op2))
41139}
41140#[doc = "Subtract"]
41141#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"]
41142#[inline(always)]
41143#[target_feature(enable = "sve")]
41144#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41145#[cfg_attr(test, assert_instr(sub))]
41146pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
41147    svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
41148}
41149#[doc = "Subtract"]
41150#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"]
41151#[inline(always)]
41152#[target_feature(enable = "sve")]
41153#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41154#[cfg_attr(test, assert_instr(sub))]
41155pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
41156    svsub_s32_z(pg, op1, svdup_n_s32(op2))
41157}
41158#[doc = "Subtract"]
41159#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"]
41160#[inline(always)]
41161#[target_feature(enable = "sve")]
41162#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41163#[cfg_attr(test, assert_instr(sub))]
41164pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
41165    unsafe extern "unadjusted" {
41166        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")]
41167        fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
41168    }
41169    unsafe { _svsub_s64_m(pg.sve_into(), op1, op2) }
41170}
41171#[doc = "Subtract"]
41172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"]
41173#[inline(always)]
41174#[target_feature(enable = "sve")]
41175#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41176#[cfg_attr(test, assert_instr(sub))]
41177pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
41178    svsub_s64_m(pg, op1, svdup_n_s64(op2))
41179}
41180#[doc = "Subtract"]
41181#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"]
41182#[inline(always)]
41183#[target_feature(enable = "sve")]
41184#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41185#[cfg_attr(test, assert_instr(sub))]
41186pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
41187    svsub_s64_m(pg, op1, op2)
41188}
41189#[doc = "Subtract"]
41190#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"]
41191#[inline(always)]
41192#[target_feature(enable = "sve")]
41193#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41194#[cfg_attr(test, assert_instr(sub))]
41195pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
41196    svsub_s64_x(pg, op1, svdup_n_s64(op2))
41197}
41198#[doc = "Subtract"]
41199#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"]
41200#[inline(always)]
41201#[target_feature(enable = "sve")]
41202#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41203#[cfg_attr(test, assert_instr(sub))]
41204pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
41205    svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
41206}
41207#[doc = "Subtract"]
41208#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"]
41209#[inline(always)]
41210#[target_feature(enable = "sve")]
41211#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41212#[cfg_attr(test, assert_instr(sub))]
41213pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
41214    svsub_s64_z(pg, op1, svdup_n_s64(op2))
41215}
41216#[doc = "Subtract"]
41217#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"]
41218#[inline(always)]
41219#[target_feature(enable = "sve")]
41220#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41221#[cfg_attr(test, assert_instr(sub))]
41222pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
41223    unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
41224}
41225#[doc = "Subtract"]
41226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"]
41227#[inline(always)]
41228#[target_feature(enable = "sve")]
41229#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41230#[cfg_attr(test, assert_instr(sub))]
41231pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
41232    svsub_u8_m(pg, op1, svdup_n_u8(op2))
41233}
41234#[doc = "Subtract"]
41235#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"]
41236#[inline(always)]
41237#[target_feature(enable = "sve")]
41238#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41239#[cfg_attr(test, assert_instr(sub))]
41240pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
41241    svsub_u8_m(pg, op1, op2)
41242}
41243#[doc = "Subtract"]
41244#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"]
41245#[inline(always)]
41246#[target_feature(enable = "sve")]
41247#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41248#[cfg_attr(test, assert_instr(sub))]
41249pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
41250    svsub_u8_x(pg, op1, svdup_n_u8(op2))
41251}
41252#[doc = "Subtract"]
41253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"]
41254#[inline(always)]
41255#[target_feature(enable = "sve")]
41256#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41257#[cfg_attr(test, assert_instr(sub))]
41258pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
41259    svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
41260}
41261#[doc = "Subtract"]
41262#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"]
41263#[inline(always)]
41264#[target_feature(enable = "sve")]
41265#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41266#[cfg_attr(test, assert_instr(sub))]
41267pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
41268    svsub_u8_z(pg, op1, svdup_n_u8(op2))
41269}
41270#[doc = "Subtract"]
41271#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"]
41272#[inline(always)]
41273#[target_feature(enable = "sve")]
41274#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41275#[cfg_attr(test, assert_instr(sub))]
41276pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
41277    unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
41278}
41279#[doc = "Subtract"]
41280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"]
41281#[inline(always)]
41282#[target_feature(enable = "sve")]
41283#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41284#[cfg_attr(test, assert_instr(sub))]
41285pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
41286    svsub_u16_m(pg, op1, svdup_n_u16(op2))
41287}
41288#[doc = "Subtract"]
41289#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"]
41290#[inline(always)]
41291#[target_feature(enable = "sve")]
41292#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41293#[cfg_attr(test, assert_instr(sub))]
41294pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
41295    svsub_u16_m(pg, op1, op2)
41296}
41297#[doc = "Subtract"]
41298#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"]
41299#[inline(always)]
41300#[target_feature(enable = "sve")]
41301#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41302#[cfg_attr(test, assert_instr(sub))]
41303pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
41304    svsub_u16_x(pg, op1, svdup_n_u16(op2))
41305}
41306#[doc = "Subtract"]
41307#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"]
41308#[inline(always)]
41309#[target_feature(enable = "sve")]
41310#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41311#[cfg_attr(test, assert_instr(sub))]
41312pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
41313    svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
41314}
41315#[doc = "Subtract"]
41316#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"]
41317#[inline(always)]
41318#[target_feature(enable = "sve")]
41319#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41320#[cfg_attr(test, assert_instr(sub))]
41321pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
41322    svsub_u16_z(pg, op1, svdup_n_u16(op2))
41323}
41324#[doc = "Subtract"]
41325#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"]
41326#[inline(always)]
41327#[target_feature(enable = "sve")]
41328#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41329#[cfg_attr(test, assert_instr(sub))]
41330pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
41331    unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
41332}
41333#[doc = "Subtract"]
41334#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"]
41335#[inline(always)]
41336#[target_feature(enable = "sve")]
41337#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41338#[cfg_attr(test, assert_instr(sub))]
41339pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
41340    svsub_u32_m(pg, op1, svdup_n_u32(op2))
41341}
41342#[doc = "Subtract"]
41343#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"]
41344#[inline(always)]
41345#[target_feature(enable = "sve")]
41346#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41347#[cfg_attr(test, assert_instr(sub))]
41348pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
41349    svsub_u32_m(pg, op1, op2)
41350}
41351#[doc = "Subtract"]
41352#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"]
41353#[inline(always)]
41354#[target_feature(enable = "sve")]
41355#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41356#[cfg_attr(test, assert_instr(sub))]
41357pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
41358    svsub_u32_x(pg, op1, svdup_n_u32(op2))
41359}
41360#[doc = "Subtract"]
41361#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"]
41362#[inline(always)]
41363#[target_feature(enable = "sve")]
41364#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41365#[cfg_attr(test, assert_instr(sub))]
41366pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
41367    svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
41368}
41369#[doc = "Subtract"]
41370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"]
41371#[inline(always)]
41372#[target_feature(enable = "sve")]
41373#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41374#[cfg_attr(test, assert_instr(sub))]
41375pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
41376    svsub_u32_z(pg, op1, svdup_n_u32(op2))
41377}
41378#[doc = "Subtract"]
41379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"]
41380#[inline(always)]
41381#[target_feature(enable = "sve")]
41382#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41383#[cfg_attr(test, assert_instr(sub))]
41384pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
41385    unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
41386}
41387#[doc = "Subtract"]
41388#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"]
41389#[inline(always)]
41390#[target_feature(enable = "sve")]
41391#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41392#[cfg_attr(test, assert_instr(sub))]
41393pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
41394    svsub_u64_m(pg, op1, svdup_n_u64(op2))
41395}
41396#[doc = "Subtract"]
41397#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"]
41398#[inline(always)]
41399#[target_feature(enable = "sve")]
41400#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41401#[cfg_attr(test, assert_instr(sub))]
41402pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
41403    svsub_u64_m(pg, op1, op2)
41404}
41405#[doc = "Subtract"]
41406#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"]
41407#[inline(always)]
41408#[target_feature(enable = "sve")]
41409#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41410#[cfg_attr(test, assert_instr(sub))]
41411pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
41412    svsub_u64_x(pg, op1, svdup_n_u64(op2))
41413}
41414#[doc = "Subtract"]
41415#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"]
41416#[inline(always)]
41417#[target_feature(enable = "sve")]
41418#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41419#[cfg_attr(test, assert_instr(sub))]
41420pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
41421    svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
41422}
41423#[doc = "Subtract"]
41424#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"]
41425#[inline(always)]
41426#[target_feature(enable = "sve")]
41427#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41428#[cfg_attr(test, assert_instr(sub))]
41429pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
41430    svsub_u64_z(pg, op1, svdup_n_u64(op2))
41431}
41432#[doc = "Subtract reversed"]
41433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_m)"]
41434#[inline(always)]
41435#[target_feature(enable = "sve")]
41436#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41437#[cfg_attr(test, assert_instr(fsubr))]
41438pub fn svsubr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
41439    unsafe extern "unadjusted" {
41440        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv4f32")]
41441        fn _svsubr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
41442    }
41443    unsafe { _svsubr_f32_m(pg.sve_into(), op1, op2) }
41444}
41445#[doc = "Subtract reversed"]
41446#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_m)"]
41447#[inline(always)]
41448#[target_feature(enable = "sve")]
41449#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41450#[cfg_attr(test, assert_instr(fsubr))]
41451pub fn svsubr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
41452    svsubr_f32_m(pg, op1, svdup_n_f32(op2))
41453}
41454#[doc = "Subtract reversed"]
41455#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_x)"]
41456#[inline(always)]
41457#[target_feature(enable = "sve")]
41458#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41459#[cfg_attr(test, assert_instr(fsubr))]
41460pub fn svsubr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
41461    svsubr_f32_m(pg, op1, op2)
41462}
41463#[doc = "Subtract reversed"]
41464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_x)"]
41465#[inline(always)]
41466#[target_feature(enable = "sve")]
41467#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41468#[cfg_attr(test, assert_instr(fsubr))]
41469pub fn svsubr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
41470    svsubr_f32_x(pg, op1, svdup_n_f32(op2))
41471}
41472#[doc = "Subtract reversed"]
41473#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_z)"]
41474#[inline(always)]
41475#[target_feature(enable = "sve")]
41476#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41477#[cfg_attr(test, assert_instr(fsubr))]
41478pub fn svsubr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
41479    svsubr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
41480}
41481#[doc = "Subtract reversed"]
41482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_z)"]
41483#[inline(always)]
41484#[target_feature(enable = "sve")]
41485#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41486#[cfg_attr(test, assert_instr(fsubr))]
41487pub fn svsubr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
41488    svsubr_f32_z(pg, op1, svdup_n_f32(op2))
41489}
41490#[doc = "Subtract reversed"]
41491#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_m)"]
41492#[inline(always)]
41493#[target_feature(enable = "sve")]
41494#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41495#[cfg_attr(test, assert_instr(fsubr))]
41496pub fn svsubr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
41497    unsafe extern "unadjusted" {
41498        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv2f64")]
41499        fn _svsubr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
41500    }
41501    unsafe { _svsubr_f64_m(pg.sve_into(), op1, op2) }
41502}
41503#[doc = "Subtract reversed"]
41504#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_m)"]
41505#[inline(always)]
41506#[target_feature(enable = "sve")]
41507#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41508#[cfg_attr(test, assert_instr(fsubr))]
41509pub fn svsubr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
41510    svsubr_f64_m(pg, op1, svdup_n_f64(op2))
41511}
41512#[doc = "Subtract reversed"]
41513#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_x)"]
41514#[inline(always)]
41515#[target_feature(enable = "sve")]
41516#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41517#[cfg_attr(test, assert_instr(fsubr))]
41518pub fn svsubr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
41519    svsubr_f64_m(pg, op1, op2)
41520}
41521#[doc = "Subtract reversed"]
41522#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_x)"]
41523#[inline(always)]
41524#[target_feature(enable = "sve")]
41525#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41526#[cfg_attr(test, assert_instr(fsubr))]
41527pub fn svsubr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
41528    svsubr_f64_x(pg, op1, svdup_n_f64(op2))
41529}
41530#[doc = "Subtract reversed"]
41531#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_z)"]
41532#[inline(always)]
41533#[target_feature(enable = "sve")]
41534#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41535#[cfg_attr(test, assert_instr(fsubr))]
41536pub fn svsubr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
41537    svsubr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
41538}
41539#[doc = "Subtract reversed"]
41540#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_z)"]
41541#[inline(always)]
41542#[target_feature(enable = "sve")]
41543#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41544#[cfg_attr(test, assert_instr(fsubr))]
41545pub fn svsubr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
41546    svsubr_f64_z(pg, op1, svdup_n_f64(op2))
41547}
41548#[doc = "Subtract reversed"]
41549#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_m)"]
41550#[inline(always)]
41551#[target_feature(enable = "sve")]
41552#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41553#[cfg_attr(test, assert_instr(subr))]
41554pub fn svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
41555    unsafe extern "unadjusted" {
41556        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv16i8")]
41557        fn _svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
41558    }
41559    unsafe { _svsubr_s8_m(pg, op1, op2) }
41560}
41561#[doc = "Subtract reversed"]
41562#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_m)"]
41563#[inline(always)]
41564#[target_feature(enable = "sve")]
41565#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41566#[cfg_attr(test, assert_instr(subr))]
41567pub fn svsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
41568    svsubr_s8_m(pg, op1, svdup_n_s8(op2))
41569}
41570#[doc = "Subtract reversed"]
41571#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_x)"]
41572#[inline(always)]
41573#[target_feature(enable = "sve")]
41574#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41575#[cfg_attr(test, assert_instr(subr))]
41576pub fn svsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
41577    svsubr_s8_m(pg, op1, op2)
41578}
41579#[doc = "Subtract reversed"]
41580#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_x)"]
41581#[inline(always)]
41582#[target_feature(enable = "sve")]
41583#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41584#[cfg_attr(test, assert_instr(subr))]
41585pub fn svsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
41586    svsubr_s8_x(pg, op1, svdup_n_s8(op2))
41587}
41588#[doc = "Subtract reversed"]
41589#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_z)"]
41590#[inline(always)]
41591#[target_feature(enable = "sve")]
41592#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41593#[cfg_attr(test, assert_instr(subr))]
41594pub fn svsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
41595    svsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
41596}
41597#[doc = "Subtract reversed"]
41598#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_z)"]
41599#[inline(always)]
41600#[target_feature(enable = "sve")]
41601#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41602#[cfg_attr(test, assert_instr(subr))]
41603pub fn svsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
41604    svsubr_s8_z(pg, op1, svdup_n_s8(op2))
41605}
41606#[doc = "Subtract reversed"]
41607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_m)"]
41608#[inline(always)]
41609#[target_feature(enable = "sve")]
41610#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41611#[cfg_attr(test, assert_instr(subr))]
41612pub fn svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
41613    unsafe extern "unadjusted" {
41614        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv8i16")]
41615        fn _svsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
41616    }
41617    unsafe { _svsubr_s16_m(pg.sve_into(), op1, op2) }
41618}
41619#[doc = "Subtract reversed"]
41620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_m)"]
41621#[inline(always)]
41622#[target_feature(enable = "sve")]
41623#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41624#[cfg_attr(test, assert_instr(subr))]
41625pub fn svsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
41626    svsubr_s16_m(pg, op1, svdup_n_s16(op2))
41627}
41628#[doc = "Subtract reversed"]
41629#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_x)"]
41630#[inline(always)]
41631#[target_feature(enable = "sve")]
41632#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41633#[cfg_attr(test, assert_instr(subr))]
41634pub fn svsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
41635    svsubr_s16_m(pg, op1, op2)
41636}
41637#[doc = "Subtract reversed"]
41638#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_x)"]
41639#[inline(always)]
41640#[target_feature(enable = "sve")]
41641#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41642#[cfg_attr(test, assert_instr(subr))]
41643pub fn svsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
41644    svsubr_s16_x(pg, op1, svdup_n_s16(op2))
41645}
41646#[doc = "Subtract reversed"]
41647#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_z)"]
41648#[inline(always)]
41649#[target_feature(enable = "sve")]
41650#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41651#[cfg_attr(test, assert_instr(subr))]
41652pub fn svsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
41653    svsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
41654}
41655#[doc = "Subtract reversed"]
41656#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_z)"]
41657#[inline(always)]
41658#[target_feature(enable = "sve")]
41659#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41660#[cfg_attr(test, assert_instr(subr))]
41661pub fn svsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
41662    svsubr_s16_z(pg, op1, svdup_n_s16(op2))
41663}
41664#[doc = "Subtract reversed"]
41665#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_m)"]
41666#[inline(always)]
41667#[target_feature(enable = "sve")]
41668#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41669#[cfg_attr(test, assert_instr(subr))]
41670pub fn svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
41671    unsafe extern "unadjusted" {
41672        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv4i32")]
41673        fn _svsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
41674    }
41675    unsafe { _svsubr_s32_m(pg.sve_into(), op1, op2) }
41676}
41677#[doc = "Subtract reversed"]
41678#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_m)"]
41679#[inline(always)]
41680#[target_feature(enable = "sve")]
41681#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41682#[cfg_attr(test, assert_instr(subr))]
41683pub fn svsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
41684    svsubr_s32_m(pg, op1, svdup_n_s32(op2))
41685}
41686#[doc = "Subtract reversed"]
41687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_x)"]
41688#[inline(always)]
41689#[target_feature(enable = "sve")]
41690#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41691#[cfg_attr(test, assert_instr(subr))]
41692pub fn svsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
41693    svsubr_s32_m(pg, op1, op2)
41694}
41695#[doc = "Subtract reversed"]
41696#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_x)"]
41697#[inline(always)]
41698#[target_feature(enable = "sve")]
41699#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41700#[cfg_attr(test, assert_instr(subr))]
41701pub fn svsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
41702    svsubr_s32_x(pg, op1, svdup_n_s32(op2))
41703}
41704#[doc = "Subtract reversed"]
41705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_z)"]
41706#[inline(always)]
41707#[target_feature(enable = "sve")]
41708#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41709#[cfg_attr(test, assert_instr(subr))]
41710pub fn svsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
41711    svsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
41712}
41713#[doc = "Subtract reversed"]
41714#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_z)"]
41715#[inline(always)]
41716#[target_feature(enable = "sve")]
41717#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41718#[cfg_attr(test, assert_instr(subr))]
41719pub fn svsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
41720    svsubr_s32_z(pg, op1, svdup_n_s32(op2))
41721}
41722#[doc = "Subtract reversed"]
41723#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_m)"]
41724#[inline(always)]
41725#[target_feature(enable = "sve")]
41726#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41727#[cfg_attr(test, assert_instr(subr))]
41728pub fn svsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
41729    unsafe extern "unadjusted" {
41730        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv2i64")]
41731        fn _svsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
41732    }
41733    unsafe { _svsubr_s64_m(pg.sve_into(), op1, op2) }
41734}
41735#[doc = "Subtract reversed"]
41736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_m)"]
41737#[inline(always)]
41738#[target_feature(enable = "sve")]
41739#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41740#[cfg_attr(test, assert_instr(subr))]
41741pub fn svsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
41742    svsubr_s64_m(pg, op1, svdup_n_s64(op2))
41743}
41744#[doc = "Subtract reversed"]
41745#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_x)"]
41746#[inline(always)]
41747#[target_feature(enable = "sve")]
41748#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41749#[cfg_attr(test, assert_instr(subr))]
41750pub fn svsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
41751    svsubr_s64_m(pg, op1, op2)
41752}
41753#[doc = "Subtract reversed"]
41754#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_x)"]
41755#[inline(always)]
41756#[target_feature(enable = "sve")]
41757#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41758#[cfg_attr(test, assert_instr(subr))]
41759pub fn svsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
41760    svsubr_s64_x(pg, op1, svdup_n_s64(op2))
41761}
41762#[doc = "Subtract reversed"]
41763#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_z)"]
41764#[inline(always)]
41765#[target_feature(enable = "sve")]
41766#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41767#[cfg_attr(test, assert_instr(subr))]
41768pub fn svsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
41769    svsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
41770}
41771#[doc = "Subtract reversed"]
41772#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_z)"]
41773#[inline(always)]
41774#[target_feature(enable = "sve")]
41775#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41776#[cfg_attr(test, assert_instr(subr))]
41777pub fn svsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
41778    svsubr_s64_z(pg, op1, svdup_n_s64(op2))
41779}
41780#[doc = "Subtract reversed"]
41781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_m)"]
41782#[inline(always)]
41783#[target_feature(enable = "sve")]
41784#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41785#[cfg_attr(test, assert_instr(subr))]
41786pub fn svsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
41787    unsafe { svsubr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
41788}
41789#[doc = "Subtract reversed"]
41790#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_m)"]
41791#[inline(always)]
41792#[target_feature(enable = "sve")]
41793#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41794#[cfg_attr(test, assert_instr(subr))]
41795pub fn svsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
41796    svsubr_u8_m(pg, op1, svdup_n_u8(op2))
41797}
41798#[doc = "Subtract reversed"]
41799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_x)"]
41800#[inline(always)]
41801#[target_feature(enable = "sve")]
41802#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41803#[cfg_attr(test, assert_instr(subr))]
41804pub fn svsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
41805    svsubr_u8_m(pg, op1, op2)
41806}
41807#[doc = "Subtract reversed"]
41808#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_x)"]
41809#[inline(always)]
41810#[target_feature(enable = "sve")]
41811#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41812#[cfg_attr(test, assert_instr(subr))]
41813pub fn svsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
41814    svsubr_u8_x(pg, op1, svdup_n_u8(op2))
41815}
41816#[doc = "Subtract reversed"]
41817#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_z)"]
41818#[inline(always)]
41819#[target_feature(enable = "sve")]
41820#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41821#[cfg_attr(test, assert_instr(subr))]
41822pub fn svsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
41823    svsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
41824}
41825#[doc = "Subtract reversed"]
41826#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_z)"]
41827#[inline(always)]
41828#[target_feature(enable = "sve")]
41829#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41830#[cfg_attr(test, assert_instr(subr))]
41831pub fn svsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
41832    svsubr_u8_z(pg, op1, svdup_n_u8(op2))
41833}
41834#[doc = "Subtract reversed"]
41835#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_m)"]
41836#[inline(always)]
41837#[target_feature(enable = "sve")]
41838#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41839#[cfg_attr(test, assert_instr(subr))]
41840pub fn svsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
41841    unsafe { svsubr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
41842}
41843#[doc = "Subtract reversed"]
41844#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_m)"]
41845#[inline(always)]
41846#[target_feature(enable = "sve")]
41847#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41848#[cfg_attr(test, assert_instr(subr))]
41849pub fn svsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
41850    svsubr_u16_m(pg, op1, svdup_n_u16(op2))
41851}
41852#[doc = "Subtract reversed"]
41853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_x)"]
41854#[inline(always)]
41855#[target_feature(enable = "sve")]
41856#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41857#[cfg_attr(test, assert_instr(subr))]
41858pub fn svsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
41859    svsubr_u16_m(pg, op1, op2)
41860}
41861#[doc = "Subtract reversed"]
41862#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_x)"]
41863#[inline(always)]
41864#[target_feature(enable = "sve")]
41865#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41866#[cfg_attr(test, assert_instr(subr))]
41867pub fn svsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
41868    svsubr_u16_x(pg, op1, svdup_n_u16(op2))
41869}
41870#[doc = "Subtract reversed"]
41871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_z)"]
41872#[inline(always)]
41873#[target_feature(enable = "sve")]
41874#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41875#[cfg_attr(test, assert_instr(subr))]
41876pub fn svsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
41877    svsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
41878}
41879#[doc = "Subtract reversed"]
41880#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_z)"]
41881#[inline(always)]
41882#[target_feature(enable = "sve")]
41883#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41884#[cfg_attr(test, assert_instr(subr))]
41885pub fn svsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
41886    svsubr_u16_z(pg, op1, svdup_n_u16(op2))
41887}
41888#[doc = "Subtract reversed"]
41889#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_m)"]
41890#[inline(always)]
41891#[target_feature(enable = "sve")]
41892#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41893#[cfg_attr(test, assert_instr(subr))]
41894pub fn svsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
41895    unsafe { svsubr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
41896}
41897#[doc = "Subtract reversed"]
41898#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_m)"]
41899#[inline(always)]
41900#[target_feature(enable = "sve")]
41901#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41902#[cfg_attr(test, assert_instr(subr))]
41903pub fn svsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
41904    svsubr_u32_m(pg, op1, svdup_n_u32(op2))
41905}
41906#[doc = "Subtract reversed"]
41907#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_x)"]
41908#[inline(always)]
41909#[target_feature(enable = "sve")]
41910#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41911#[cfg_attr(test, assert_instr(subr))]
41912pub fn svsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
41913    svsubr_u32_m(pg, op1, op2)
41914}
41915#[doc = "Subtract reversed"]
41916#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_x)"]
41917#[inline(always)]
41918#[target_feature(enable = "sve")]
41919#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41920#[cfg_attr(test, assert_instr(subr))]
41921pub fn svsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
41922    svsubr_u32_x(pg, op1, svdup_n_u32(op2))
41923}
41924#[doc = "Subtract reversed"]
41925#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_z)"]
41926#[inline(always)]
41927#[target_feature(enable = "sve")]
41928#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41929#[cfg_attr(test, assert_instr(subr))]
41930pub fn svsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
41931    svsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
41932}
41933#[doc = "Subtract reversed"]
41934#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_z)"]
41935#[inline(always)]
41936#[target_feature(enable = "sve")]
41937#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41938#[cfg_attr(test, assert_instr(subr))]
41939pub fn svsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
41940    svsubr_u32_z(pg, op1, svdup_n_u32(op2))
41941}
41942#[doc = "Subtract reversed"]
41943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_m)"]
41944#[inline(always)]
41945#[target_feature(enable = "sve")]
41946#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41947#[cfg_attr(test, assert_instr(subr))]
41948pub fn svsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
41949    unsafe { svsubr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
41950}
41951#[doc = "Subtract reversed"]
41952#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_m)"]
41953#[inline(always)]
41954#[target_feature(enable = "sve")]
41955#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41956#[cfg_attr(test, assert_instr(subr))]
41957pub fn svsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
41958    svsubr_u64_m(pg, op1, svdup_n_u64(op2))
41959}
41960#[doc = "Subtract reversed"]
41961#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_x)"]
41962#[inline(always)]
41963#[target_feature(enable = "sve")]
41964#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41965#[cfg_attr(test, assert_instr(subr))]
41966pub fn svsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
41967    svsubr_u64_m(pg, op1, op2)
41968}
41969#[doc = "Subtract reversed"]
41970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_x)"]
41971#[inline(always)]
41972#[target_feature(enable = "sve")]
41973#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41974#[cfg_attr(test, assert_instr(subr))]
41975pub fn svsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
41976    svsubr_u64_x(pg, op1, svdup_n_u64(op2))
41977}
41978#[doc = "Subtract reversed"]
41979#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_z)"]
41980#[inline(always)]
41981#[target_feature(enable = "sve")]
41982#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41983#[cfg_attr(test, assert_instr(subr))]
41984pub fn svsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
41985    svsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
41986}
41987#[doc = "Subtract reversed"]
41988#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_z)"]
41989#[inline(always)]
41990#[target_feature(enable = "sve")]
41991#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
41992#[cfg_attr(test, assert_instr(subr))]
41993pub fn svsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
41994    svsubr_u64_z(pg, op1, svdup_n_u64(op2))
41995}
41996#[doc = "Dot product (signed × unsigned)"]
41997#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot_lane[_s32])"]
41998#[inline(always)]
41999#[target_feature(enable = "sve,i8mm")]
42000#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42001#[cfg_attr(test, assert_instr(sudot, IMM_INDEX = 0))]
42002pub fn svsudot_lane_s32<const IMM_INDEX: i32>(
42003    op1: svint32_t,
42004    op2: svint8_t,
42005    op3: svuint8_t,
42006) -> svint32_t {
42007    static_assert_range!(IMM_INDEX, 0..=3);
42008    unsafe extern "unadjusted" {
42009        #[cfg_attr(
42010            target_arch = "aarch64",
42011            link_name = "llvm.aarch64.sve.sudot.lane.nxv4i32"
42012        )]
42013        fn _svsudot_lane_s32(
42014            op1: svint32_t,
42015            op2: svint8_t,
42016            op3: svint8_t,
42017            imm_index: i32,
42018        ) -> svint32_t;
42019    }
42020    unsafe { _svsudot_lane_s32(op1, op2, op3.as_signed(), IMM_INDEX) }
42021}
42022#[doc = "Dot product (signed × unsigned)"]
42023#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_s32])"]
42024#[inline(always)]
42025#[target_feature(enable = "sve,i8mm")]
42026#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42027#[cfg_attr(test, assert_instr(usdot))]
42028pub fn svsudot_s32(op1: svint32_t, op2: svint8_t, op3: svuint8_t) -> svint32_t {
42029    svusdot_s32(op1, op3, op2)
42030}
42031#[doc = "Dot product (signed × unsigned)"]
42032#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_n_s32])"]
42033#[inline(always)]
42034#[target_feature(enable = "sve,i8mm")]
42035#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42036#[cfg_attr(test, assert_instr(usdot))]
42037pub fn svsudot_n_s32(op1: svint32_t, op2: svint8_t, op3: u8) -> svint32_t {
42038    svsudot_s32(op1, op2, svdup_n_u8(op3))
42039}
42040#[doc = "Table lookup in single-vector table"]
42041#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f32])"]
42042#[inline(always)]
42043#[target_feature(enable = "sve")]
42044#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42045#[cfg_attr(test, assert_instr(tbl))]
42046pub fn svtbl_f32(data: svfloat32_t, indices: svuint32_t) -> svfloat32_t {
42047    unsafe extern "unadjusted" {
42048        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4f32")]
42049        fn _svtbl_f32(data: svfloat32_t, indices: svint32_t) -> svfloat32_t;
42050    }
42051    unsafe { _svtbl_f32(data, indices.as_signed()) }
42052}
42053#[doc = "Table lookup in single-vector table"]
42054#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f64])"]
42055#[inline(always)]
42056#[target_feature(enable = "sve")]
42057#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42058#[cfg_attr(test, assert_instr(tbl))]
42059pub fn svtbl_f64(data: svfloat64_t, indices: svuint64_t) -> svfloat64_t {
42060    unsafe extern "unadjusted" {
42061        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2f64")]
42062        fn _svtbl_f64(data: svfloat64_t, indices: svint64_t) -> svfloat64_t;
42063    }
42064    unsafe { _svtbl_f64(data, indices.as_signed()) }
42065}
42066#[doc = "Table lookup in single-vector table"]
42067#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s8])"]
42068#[inline(always)]
42069#[target_feature(enable = "sve")]
42070#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42071#[cfg_attr(test, assert_instr(tbl))]
42072pub fn svtbl_s8(data: svint8_t, indices: svuint8_t) -> svint8_t {
42073    unsafe extern "unadjusted" {
42074        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv16i8")]
42075        fn _svtbl_s8(data: svint8_t, indices: svint8_t) -> svint8_t;
42076    }
42077    unsafe { _svtbl_s8(data, indices.as_signed()) }
42078}
42079#[doc = "Table lookup in single-vector table"]
42080#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s16])"]
42081#[inline(always)]
42082#[target_feature(enable = "sve")]
42083#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42084#[cfg_attr(test, assert_instr(tbl))]
42085pub fn svtbl_s16(data: svint16_t, indices: svuint16_t) -> svint16_t {
42086    unsafe extern "unadjusted" {
42087        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv8i16")]
42088        fn _svtbl_s16(data: svint16_t, indices: svint16_t) -> svint16_t;
42089    }
42090    unsafe { _svtbl_s16(data, indices.as_signed()) }
42091}
42092#[doc = "Table lookup in single-vector table"]
42093#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s32])"]
42094#[inline(always)]
42095#[target_feature(enable = "sve")]
42096#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42097#[cfg_attr(test, assert_instr(tbl))]
42098pub fn svtbl_s32(data: svint32_t, indices: svuint32_t) -> svint32_t {
42099    unsafe extern "unadjusted" {
42100        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4i32")]
42101        fn _svtbl_s32(data: svint32_t, indices: svint32_t) -> svint32_t;
42102    }
42103    unsafe { _svtbl_s32(data, indices.as_signed()) }
42104}
42105#[doc = "Table lookup in single-vector table"]
42106#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s64])"]
42107#[inline(always)]
42108#[target_feature(enable = "sve")]
42109#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42110#[cfg_attr(test, assert_instr(tbl))]
42111pub fn svtbl_s64(data: svint64_t, indices: svuint64_t) -> svint64_t {
42112    unsafe extern "unadjusted" {
42113        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2i64")]
42114        fn _svtbl_s64(data: svint64_t, indices: svint64_t) -> svint64_t;
42115    }
42116    unsafe { _svtbl_s64(data, indices.as_signed()) }
42117}
42118#[doc = "Table lookup in single-vector table"]
42119#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u8])"]
42120#[inline(always)]
42121#[target_feature(enable = "sve")]
42122#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42123#[cfg_attr(test, assert_instr(tbl))]
42124pub fn svtbl_u8(data: svuint8_t, indices: svuint8_t) -> svuint8_t {
42125    unsafe { svtbl_s8(data.as_signed(), indices).as_unsigned() }
42126}
42127#[doc = "Table lookup in single-vector table"]
42128#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u16])"]
42129#[inline(always)]
42130#[target_feature(enable = "sve")]
42131#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42132#[cfg_attr(test, assert_instr(tbl))]
42133pub fn svtbl_u16(data: svuint16_t, indices: svuint16_t) -> svuint16_t {
42134    unsafe { svtbl_s16(data.as_signed(), indices).as_unsigned() }
42135}
42136#[doc = "Table lookup in single-vector table"]
42137#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u32])"]
42138#[inline(always)]
42139#[target_feature(enable = "sve")]
42140#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42141#[cfg_attr(test, assert_instr(tbl))]
42142pub fn svtbl_u32(data: svuint32_t, indices: svuint32_t) -> svuint32_t {
42143    unsafe { svtbl_s32(data.as_signed(), indices).as_unsigned() }
42144}
42145#[doc = "Table lookup in single-vector table"]
42146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u64])"]
42147#[inline(always)]
42148#[target_feature(enable = "sve")]
42149#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42150#[cfg_attr(test, assert_instr(tbl))]
42151pub fn svtbl_u64(data: svuint64_t, indices: svuint64_t) -> svuint64_t {
42152    unsafe { svtbl_s64(data.as_signed(), indices).as_unsigned() }
42153}
42154#[doc = "Trigonometric multiply-add coefficient"]
42155#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f32])"]
42156#[inline(always)]
42157#[target_feature(enable = "sve")]
42158#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42159#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))]
42160pub fn svtmad_f32<const IMM3: i32>(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
42161    static_assert_range!(IMM3, 0..=7);
42162    unsafe extern "unadjusted" {
42163        #[cfg_attr(
42164            target_arch = "aarch64",
42165            link_name = "llvm.aarch64.sve.ftmad.x.nxv4f32"
42166        )]
42167        fn _svtmad_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t;
42168    }
42169    unsafe { _svtmad_f32(op1, op2, IMM3) }
42170}
42171#[doc = "Trigonometric multiply-add coefficient"]
42172#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f64])"]
42173#[inline(always)]
42174#[target_feature(enable = "sve")]
42175#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42176#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))]
42177pub fn svtmad_f64<const IMM3: i32>(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
42178    static_assert_range!(IMM3, 0..=7);
42179    unsafe extern "unadjusted" {
42180        #[cfg_attr(
42181            target_arch = "aarch64",
42182            link_name = "llvm.aarch64.sve.ftmad.x.nxv2f64"
42183        )]
42184        fn _svtmad_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t;
42185    }
42186    unsafe { _svtmad_f64(op1, op2, IMM3) }
42187}
42188#[doc = "Interleave even elements from two inputs"]
42189#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b8)"]
42190#[inline(always)]
42191#[target_feature(enable = "sve")]
42192#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42193#[cfg_attr(test, assert_instr(trn1))]
42194pub fn svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
42195    unsafe extern "unadjusted" {
42196        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i1")]
42197        fn _svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
42198    }
42199    unsafe { _svtrn1_b8(op1, op2) }
42200}
42201#[doc = "Interleave even elements from two inputs"]
42202#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b16)"]
42203#[inline(always)]
42204#[target_feature(enable = "sve")]
42205#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42206#[cfg_attr(test, assert_instr(trn1))]
42207pub fn svtrn1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
42208    unsafe extern "unadjusted" {
42209        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i1")]
42210        fn _svtrn1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
42211    }
42212    unsafe { _svtrn1_b16(op1.sve_into(), op2.sve_into()).sve_into() }
42213}
42214#[doc = "Interleave even elements from two inputs"]
42215#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b32)"]
42216#[inline(always)]
42217#[target_feature(enable = "sve")]
42218#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42219#[cfg_attr(test, assert_instr(trn1))]
42220pub fn svtrn1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
42221    unsafe extern "unadjusted" {
42222        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i1")]
42223        fn _svtrn1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
42224    }
42225    unsafe { _svtrn1_b32(op1.sve_into(), op2.sve_into()).sve_into() }
42226}
42227#[doc = "Interleave even elements from two inputs"]
42228#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b64)"]
42229#[inline(always)]
42230#[target_feature(enable = "sve")]
42231#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42232#[cfg_attr(test, assert_instr(trn1))]
42233pub fn svtrn1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
42234    unsafe extern "unadjusted" {
42235        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i1")]
42236        fn _svtrn1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
42237    }
42238    unsafe { _svtrn1_b64(op1.sve_into(), op2.sve_into()).sve_into() }
42239}
42240#[doc = "Interleave even elements from two inputs"]
42241#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f32])"]
42242#[inline(always)]
42243#[target_feature(enable = "sve")]
42244#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42245#[cfg_attr(test, assert_instr(trn1))]
42246pub fn svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
42247    unsafe extern "unadjusted" {
42248        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4f32")]
42249        fn _svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
42250    }
42251    unsafe { _svtrn1_f32(op1, op2) }
42252}
42253#[doc = "Interleave even elements from two inputs"]
42254#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f64])"]
42255#[inline(always)]
42256#[target_feature(enable = "sve")]
42257#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42258#[cfg_attr(test, assert_instr(trn1))]
42259pub fn svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
42260    unsafe extern "unadjusted" {
42261        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2f64")]
42262        fn _svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
42263    }
42264    unsafe { _svtrn1_f64(op1, op2) }
42265}
42266#[doc = "Interleave even elements from two inputs"]
42267#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s8])"]
42268#[inline(always)]
42269#[target_feature(enable = "sve")]
42270#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42271#[cfg_attr(test, assert_instr(trn1))]
42272pub fn svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
42273    unsafe extern "unadjusted" {
42274        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i8")]
42275        fn _svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
42276    }
42277    unsafe { _svtrn1_s8(op1, op2) }
42278}
42279#[doc = "Interleave even elements from two inputs"]
42280#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s16])"]
42281#[inline(always)]
42282#[target_feature(enable = "sve")]
42283#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42284#[cfg_attr(test, assert_instr(trn1))]
42285pub fn svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
42286    unsafe extern "unadjusted" {
42287        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i16")]
42288        fn _svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
42289    }
42290    unsafe { _svtrn1_s16(op1, op2) }
42291}
42292#[doc = "Interleave even elements from two inputs"]
42293#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s32])"]
42294#[inline(always)]
42295#[target_feature(enable = "sve")]
42296#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42297#[cfg_attr(test, assert_instr(trn1))]
42298pub fn svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
42299    unsafe extern "unadjusted" {
42300        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i32")]
42301        fn _svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
42302    }
42303    unsafe { _svtrn1_s32(op1, op2) }
42304}
42305#[doc = "Interleave even elements from two inputs"]
42306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s64])"]
42307#[inline(always)]
42308#[target_feature(enable = "sve")]
42309#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42310#[cfg_attr(test, assert_instr(trn1))]
42311pub fn svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
42312    unsafe extern "unadjusted" {
42313        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i64")]
42314        fn _svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
42315    }
42316    unsafe { _svtrn1_s64(op1, op2) }
42317}
42318#[doc = "Interleave even elements from two inputs"]
42319#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u8])"]
42320#[inline(always)]
42321#[target_feature(enable = "sve")]
42322#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42323#[cfg_attr(test, assert_instr(trn1))]
42324pub fn svtrn1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
42325    unsafe { svtrn1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
42326}
42327#[doc = "Interleave even elements from two inputs"]
42328#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u16])"]
42329#[inline(always)]
42330#[target_feature(enable = "sve")]
42331#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42332#[cfg_attr(test, assert_instr(trn1))]
42333pub fn svtrn1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
42334    unsafe { svtrn1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
42335}
42336#[doc = "Interleave even elements from two inputs"]
42337#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u32])"]
42338#[inline(always)]
42339#[target_feature(enable = "sve")]
42340#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42341#[cfg_attr(test, assert_instr(trn1))]
42342pub fn svtrn1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
42343    unsafe { svtrn1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
42344}
42345#[doc = "Interleave even elements from two inputs"]
42346#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u64])"]
42347#[inline(always)]
42348#[target_feature(enable = "sve")]
42349#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42350#[cfg_attr(test, assert_instr(trn1))]
42351pub fn svtrn1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
42352    unsafe { svtrn1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
42353}
42354#[doc = "Interleave even quadwords from two inputs"]
42355#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f32])"]
42356#[inline(always)]
42357#[target_feature(enable = "sve,f64mm")]
42358#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42359#[cfg_attr(test, assert_instr(trn1))]
42360pub fn svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
42361    unsafe extern "unadjusted" {
42362        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4f32")]
42363        fn _svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
42364    }
42365    unsafe { _svtrn1q_f32(op1, op2) }
42366}
42367#[doc = "Interleave even quadwords from two inputs"]
42368#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f64])"]
42369#[inline(always)]
42370#[target_feature(enable = "sve,f64mm")]
42371#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42372#[cfg_attr(test, assert_instr(trn1))]
42373pub fn svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
42374    unsafe extern "unadjusted" {
42375        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2f64")]
42376        fn _svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
42377    }
42378    unsafe { _svtrn1q_f64(op1, op2) }
42379}
42380#[doc = "Interleave even quadwords from two inputs"]
42381#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s8])"]
42382#[inline(always)]
42383#[target_feature(enable = "sve,f64mm")]
42384#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42385#[cfg_attr(test, assert_instr(trn1))]
42386pub fn svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
42387    unsafe extern "unadjusted" {
42388        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv16i8")]
42389        fn _svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
42390    }
42391    unsafe { _svtrn1q_s8(op1, op2) }
42392}
42393#[doc = "Interleave even quadwords from two inputs"]
42394#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s16])"]
42395#[inline(always)]
42396#[target_feature(enable = "sve,f64mm")]
42397#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42398#[cfg_attr(test, assert_instr(trn1))]
42399pub fn svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
42400    unsafe extern "unadjusted" {
42401        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv8i16")]
42402        fn _svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
42403    }
42404    unsafe { _svtrn1q_s16(op1, op2) }
42405}
42406#[doc = "Interleave even quadwords from two inputs"]
42407#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s32])"]
42408#[inline(always)]
42409#[target_feature(enable = "sve,f64mm")]
42410#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42411#[cfg_attr(test, assert_instr(trn1))]
42412pub fn svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
42413    unsafe extern "unadjusted" {
42414        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4i32")]
42415        fn _svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
42416    }
42417    unsafe { _svtrn1q_s32(op1, op2) }
42418}
42419#[doc = "Interleave even quadwords from two inputs"]
42420#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s64])"]
42421#[inline(always)]
42422#[target_feature(enable = "sve,f64mm")]
42423#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42424#[cfg_attr(test, assert_instr(trn1))]
42425pub fn svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
42426    unsafe extern "unadjusted" {
42427        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2i64")]
42428        fn _svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
42429    }
42430    unsafe { _svtrn1q_s64(op1, op2) }
42431}
42432#[doc = "Interleave even quadwords from two inputs"]
42433#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u8])"]
42434#[inline(always)]
42435#[target_feature(enable = "sve,f64mm")]
42436#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42437#[cfg_attr(test, assert_instr(trn1))]
42438pub fn svtrn1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
42439    unsafe { svtrn1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
42440}
42441#[doc = "Interleave even quadwords from two inputs"]
42442#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u16])"]
42443#[inline(always)]
42444#[target_feature(enable = "sve,f64mm")]
42445#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42446#[cfg_attr(test, assert_instr(trn1))]
42447pub fn svtrn1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
42448    unsafe { svtrn1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
42449}
42450#[doc = "Interleave even quadwords from two inputs"]
42451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u32])"]
42452#[inline(always)]
42453#[target_feature(enable = "sve,f64mm")]
42454#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42455#[cfg_attr(test, assert_instr(trn1))]
42456pub fn svtrn1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
42457    unsafe { svtrn1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
42458}
42459#[doc = "Interleave even quadwords from two inputs"]
42460#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u64])"]
42461#[inline(always)]
42462#[target_feature(enable = "sve,f64mm")]
42463#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42464#[cfg_attr(test, assert_instr(trn1))]
42465pub fn svtrn1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
42466    unsafe { svtrn1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
42467}
42468#[doc = "Interleave odd elements from two inputs"]
42469#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b8)"]
42470#[inline(always)]
42471#[target_feature(enable = "sve")]
42472#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42473#[cfg_attr(test, assert_instr(trn2))]
42474pub fn svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
42475    unsafe extern "unadjusted" {
42476        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i1")]
42477        fn _svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
42478    }
42479    unsafe { _svtrn2_b8(op1, op2) }
42480}
42481#[doc = "Interleave odd elements from two inputs"]
42482#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b16)"]
42483#[inline(always)]
42484#[target_feature(enable = "sve")]
42485#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42486#[cfg_attr(test, assert_instr(trn2))]
42487pub fn svtrn2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
42488    unsafe extern "unadjusted" {
42489        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i1")]
42490        fn _svtrn2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
42491    }
42492    unsafe { _svtrn2_b16(op1.sve_into(), op2.sve_into()).sve_into() }
42493}
42494#[doc = "Interleave odd elements from two inputs"]
42495#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b32)"]
42496#[inline(always)]
42497#[target_feature(enable = "sve")]
42498#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42499#[cfg_attr(test, assert_instr(trn2))]
42500pub fn svtrn2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
42501    unsafe extern "unadjusted" {
42502        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i1")]
42503        fn _svtrn2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
42504    }
42505    unsafe { _svtrn2_b32(op1.sve_into(), op2.sve_into()).sve_into() }
42506}
42507#[doc = "Interleave odd elements from two inputs"]
42508#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b64)"]
42509#[inline(always)]
42510#[target_feature(enable = "sve")]
42511#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42512#[cfg_attr(test, assert_instr(trn2))]
42513pub fn svtrn2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
42514    unsafe extern "unadjusted" {
42515        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i1")]
42516        fn _svtrn2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
42517    }
42518    unsafe { _svtrn2_b64(op1.sve_into(), op2.sve_into()).sve_into() }
42519}
42520#[doc = "Interleave odd elements from two inputs"]
42521#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f32])"]
42522#[inline(always)]
42523#[target_feature(enable = "sve")]
42524#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42525#[cfg_attr(test, assert_instr(trn2))]
42526pub fn svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
42527    unsafe extern "unadjusted" {
42528        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4f32")]
42529        fn _svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
42530    }
42531    unsafe { _svtrn2_f32(op1, op2) }
42532}
42533#[doc = "Interleave odd elements from two inputs"]
42534#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f64])"]
42535#[inline(always)]
42536#[target_feature(enable = "sve")]
42537#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42538#[cfg_attr(test, assert_instr(trn2))]
42539pub fn svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
42540    unsafe extern "unadjusted" {
42541        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2f64")]
42542        fn _svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
42543    }
42544    unsafe { _svtrn2_f64(op1, op2) }
42545}
42546#[doc = "Interleave odd elements from two inputs"]
42547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s8])"]
42548#[inline(always)]
42549#[target_feature(enable = "sve")]
42550#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42551#[cfg_attr(test, assert_instr(trn2))]
42552pub fn svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
42553    unsafe extern "unadjusted" {
42554        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i8")]
42555        fn _svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
42556    }
42557    unsafe { _svtrn2_s8(op1, op2) }
42558}
42559#[doc = "Interleave odd elements from two inputs"]
42560#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s16])"]
42561#[inline(always)]
42562#[target_feature(enable = "sve")]
42563#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42564#[cfg_attr(test, assert_instr(trn2))]
42565pub fn svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
42566    unsafe extern "unadjusted" {
42567        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i16")]
42568        fn _svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
42569    }
42570    unsafe { _svtrn2_s16(op1, op2) }
42571}
42572#[doc = "Interleave odd elements from two inputs"]
42573#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s32])"]
42574#[inline(always)]
42575#[target_feature(enable = "sve")]
42576#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42577#[cfg_attr(test, assert_instr(trn2))]
42578pub fn svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
42579    unsafe extern "unadjusted" {
42580        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i32")]
42581        fn _svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
42582    }
42583    unsafe { _svtrn2_s32(op1, op2) }
42584}
42585#[doc = "Interleave odd elements from two inputs"]
42586#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s64])"]
42587#[inline(always)]
42588#[target_feature(enable = "sve")]
42589#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42590#[cfg_attr(test, assert_instr(trn2))]
42591pub fn svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
42592    unsafe extern "unadjusted" {
42593        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i64")]
42594        fn _svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
42595    }
42596    unsafe { _svtrn2_s64(op1, op2) }
42597}
42598#[doc = "Interleave odd elements from two inputs"]
42599#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u8])"]
42600#[inline(always)]
42601#[target_feature(enable = "sve")]
42602#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42603#[cfg_attr(test, assert_instr(trn2))]
42604pub fn svtrn2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
42605    unsafe { svtrn2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
42606}
42607#[doc = "Interleave odd elements from two inputs"]
42608#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u16])"]
42609#[inline(always)]
42610#[target_feature(enable = "sve")]
42611#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42612#[cfg_attr(test, assert_instr(trn2))]
42613pub fn svtrn2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
42614    unsafe { svtrn2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
42615}
42616#[doc = "Interleave odd elements from two inputs"]
42617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u32])"]
42618#[inline(always)]
42619#[target_feature(enable = "sve")]
42620#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42621#[cfg_attr(test, assert_instr(trn2))]
42622pub fn svtrn2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
42623    unsafe { svtrn2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
42624}
42625#[doc = "Interleave odd elements from two inputs"]
42626#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u64])"]
42627#[inline(always)]
42628#[target_feature(enable = "sve")]
42629#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42630#[cfg_attr(test, assert_instr(trn2))]
42631pub fn svtrn2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
42632    unsafe { svtrn2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
42633}
42634#[doc = "Interleave odd quadwords from two inputs"]
42635#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f32])"]
42636#[inline(always)]
42637#[target_feature(enable = "sve,f64mm")]
42638#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42639#[cfg_attr(test, assert_instr(trn2))]
42640pub fn svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
42641    unsafe extern "unadjusted" {
42642        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4f32")]
42643        fn _svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
42644    }
42645    unsafe { _svtrn2q_f32(op1, op2) }
42646}
42647#[doc = "Interleave odd quadwords from two inputs"]
42648#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f64])"]
42649#[inline(always)]
42650#[target_feature(enable = "sve,f64mm")]
42651#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42652#[cfg_attr(test, assert_instr(trn2))]
42653pub fn svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
42654    unsafe extern "unadjusted" {
42655        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2f64")]
42656        fn _svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
42657    }
42658    unsafe { _svtrn2q_f64(op1, op2) }
42659}
42660#[doc = "Interleave odd quadwords from two inputs"]
42661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s8])"]
42662#[inline(always)]
42663#[target_feature(enable = "sve,f64mm")]
42664#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42665#[cfg_attr(test, assert_instr(trn2))]
42666pub fn svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
42667    unsafe extern "unadjusted" {
42668        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv16i8")]
42669        fn _svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
42670    }
42671    unsafe { _svtrn2q_s8(op1, op2) }
42672}
42673#[doc = "Interleave odd quadwords from two inputs"]
42674#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s16])"]
42675#[inline(always)]
42676#[target_feature(enable = "sve,f64mm")]
42677#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42678#[cfg_attr(test, assert_instr(trn2))]
42679pub fn svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
42680    unsafe extern "unadjusted" {
42681        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv8i16")]
42682        fn _svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
42683    }
42684    unsafe { _svtrn2q_s16(op1, op2) }
42685}
42686#[doc = "Interleave odd quadwords from two inputs"]
42687#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s32])"]
42688#[inline(always)]
42689#[target_feature(enable = "sve,f64mm")]
42690#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42691#[cfg_attr(test, assert_instr(trn2))]
42692pub fn svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
42693    unsafe extern "unadjusted" {
42694        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4i32")]
42695        fn _svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
42696    }
42697    unsafe { _svtrn2q_s32(op1, op2) }
42698}
42699#[doc = "Interleave odd quadwords from two inputs"]
42700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s64])"]
42701#[inline(always)]
42702#[target_feature(enable = "sve,f64mm")]
42703#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42704#[cfg_attr(test, assert_instr(trn2))]
42705pub fn svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
42706    unsafe extern "unadjusted" {
42707        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2i64")]
42708        fn _svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
42709    }
42710    unsafe { _svtrn2q_s64(op1, op2) }
42711}
42712#[doc = "Interleave odd quadwords from two inputs"]
42713#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u8])"]
42714#[inline(always)]
42715#[target_feature(enable = "sve,f64mm")]
42716#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42717#[cfg_attr(test, assert_instr(trn2))]
42718pub fn svtrn2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
42719    unsafe { svtrn2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
42720}
42721#[doc = "Interleave odd quadwords from two inputs"]
42722#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u16])"]
42723#[inline(always)]
42724#[target_feature(enable = "sve,f64mm")]
42725#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42726#[cfg_attr(test, assert_instr(trn2))]
42727pub fn svtrn2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
42728    unsafe { svtrn2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
42729}
42730#[doc = "Interleave odd quadwords from two inputs"]
42731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u32])"]
42732#[inline(always)]
42733#[target_feature(enable = "sve,f64mm")]
42734#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42735#[cfg_attr(test, assert_instr(trn2))]
42736pub fn svtrn2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
42737    unsafe { svtrn2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
42738}
42739#[doc = "Interleave odd quadwords from two inputs"]
42740#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u64])"]
42741#[inline(always)]
42742#[target_feature(enable = "sve,f64mm")]
42743#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42744#[cfg_attr(test, assert_instr(trn2))]
42745pub fn svtrn2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
42746    unsafe { svtrn2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
42747}
42748#[doc = "Trigonometric starting value"]
42749#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f32])"]
42750#[inline(always)]
42751#[target_feature(enable = "sve")]
42752#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42753#[cfg_attr(test, assert_instr(ftsmul))]
42754pub fn svtsmul_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t {
42755    unsafe extern "unadjusted" {
42756        #[cfg_attr(
42757            target_arch = "aarch64",
42758            link_name = "llvm.aarch64.sve.ftsmul.x.nxv4f32"
42759        )]
42760        fn _svtsmul_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t;
42761    }
42762    unsafe { _svtsmul_f32(op1, op2.as_signed()) }
42763}
42764#[doc = "Trigonometric starting value"]
42765#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f64])"]
42766#[inline(always)]
42767#[target_feature(enable = "sve")]
42768#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42769#[cfg_attr(test, assert_instr(ftsmul))]
42770pub fn svtsmul_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t {
42771    unsafe extern "unadjusted" {
42772        #[cfg_attr(
42773            target_arch = "aarch64",
42774            link_name = "llvm.aarch64.sve.ftsmul.x.nxv2f64"
42775        )]
42776        fn _svtsmul_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t;
42777    }
42778    unsafe { _svtsmul_f64(op1, op2.as_signed()) }
42779}
42780#[doc = "Trigonometric select coefficient"]
42781#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f32])"]
42782#[inline(always)]
42783#[target_feature(enable = "sve")]
42784#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42785#[cfg_attr(test, assert_instr(ftssel))]
42786pub fn svtssel_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t {
42787    unsafe extern "unadjusted" {
42788        #[cfg_attr(
42789            target_arch = "aarch64",
42790            link_name = "llvm.aarch64.sve.ftssel.x.nxv4f32"
42791        )]
42792        fn _svtssel_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t;
42793    }
42794    unsafe { _svtssel_f32(op1, op2.as_signed()) }
42795}
42796#[doc = "Trigonometric select coefficient"]
42797#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f64])"]
42798#[inline(always)]
42799#[target_feature(enable = "sve")]
42800#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42801#[cfg_attr(test, assert_instr(ftssel))]
42802pub fn svtssel_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t {
42803    unsafe extern "unadjusted" {
42804        #[cfg_attr(
42805            target_arch = "aarch64",
42806            link_name = "llvm.aarch64.sve.ftssel.x.nxv2f64"
42807        )]
42808        fn _svtssel_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t;
42809    }
42810    unsafe { _svtssel_f64(op1, op2.as_signed()) }
42811}
42812#[doc = "Create an uninitialized tuple of two vectors"]
42813#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f32)"]
42814#[doc = "## Safety"]
42815#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42816#[inline(always)]
42817#[target_feature(enable = "sve")]
42818#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42819pub unsafe fn svundef2_f32() -> svfloat32x2_t {
42820    svcreate2_f32(svdup_n_f32(0f32), svdup_n_f32(0f32))
42821}
42822#[doc = "Create an uninitialized tuple of two vectors"]
42823#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f64)"]
42824#[doc = "## Safety"]
42825#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42826#[inline(always)]
42827#[target_feature(enable = "sve")]
42828#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42829pub unsafe fn svundef2_f64() -> svfloat64x2_t {
42830    svcreate2_f64(svdup_n_f64(0f64), svdup_n_f64(0f64))
42831}
42832#[doc = "Create an uninitialized tuple of two vectors"]
42833#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s8)"]
42834#[doc = "## Safety"]
42835#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42836#[inline(always)]
42837#[target_feature(enable = "sve")]
42838#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42839pub unsafe fn svundef2_s8() -> svint8x2_t {
42840    svcreate2_s8(svdup_n_s8(0), svdup_n_s8(0))
42841}
42842#[doc = "Create an uninitialized tuple of two vectors"]
42843#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s16)"]
42844#[doc = "## Safety"]
42845#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42846#[inline(always)]
42847#[target_feature(enable = "sve")]
42848#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42849pub unsafe fn svundef2_s16() -> svint16x2_t {
42850    svcreate2_s16(svdup_n_s16(0), svdup_n_s16(0))
42851}
42852#[doc = "Create an uninitialized tuple of two vectors"]
42853#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s32)"]
42854#[doc = "## Safety"]
42855#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42856#[inline(always)]
42857#[target_feature(enable = "sve")]
42858#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42859pub unsafe fn svundef2_s32() -> svint32x2_t {
42860    svcreate2_s32(svdup_n_s32(0), svdup_n_s32(0))
42861}
42862#[doc = "Create an uninitialized tuple of two vectors"]
42863#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s64)"]
42864#[doc = "## Safety"]
42865#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42866#[inline(always)]
42867#[target_feature(enable = "sve")]
42868#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42869pub unsafe fn svundef2_s64() -> svint64x2_t {
42870    svcreate2_s64(svdup_n_s64(0), svdup_n_s64(0))
42871}
42872#[doc = "Create an uninitialized tuple of two vectors"]
42873#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u8)"]
42874#[doc = "## Safety"]
42875#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42876#[inline(always)]
42877#[target_feature(enable = "sve")]
42878#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42879pub unsafe fn svundef2_u8() -> svuint8x2_t {
42880    svcreate2_u8(svdup_n_u8(0), svdup_n_u8(0))
42881}
42882#[doc = "Create an uninitialized tuple of two vectors"]
42883#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u16)"]
42884#[doc = "## Safety"]
42885#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42886#[inline(always)]
42887#[target_feature(enable = "sve")]
42888#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42889pub unsafe fn svundef2_u16() -> svuint16x2_t {
42890    svcreate2_u16(svdup_n_u16(0), svdup_n_u16(0))
42891}
42892#[doc = "Create an uninitialized tuple of two vectors"]
42893#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u32)"]
42894#[doc = "## Safety"]
42895#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42896#[inline(always)]
42897#[target_feature(enable = "sve")]
42898#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42899pub unsafe fn svundef2_u32() -> svuint32x2_t {
42900    svcreate2_u32(svdup_n_u32(0), svdup_n_u32(0))
42901}
42902#[doc = "Create an uninitialized tuple of two vectors"]
42903#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u64)"]
42904#[doc = "## Safety"]
42905#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42906#[inline(always)]
42907#[target_feature(enable = "sve")]
42908#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42909pub unsafe fn svundef2_u64() -> svuint64x2_t {
42910    svcreate2_u64(svdup_n_u64(0), svdup_n_u64(0))
42911}
42912#[doc = "Create an uninitialized tuple of three vectors"]
42913#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f32)"]
42914#[doc = "## Safety"]
42915#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42916#[inline(always)]
42917#[target_feature(enable = "sve")]
42918#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42919pub unsafe fn svundef3_f32() -> svfloat32x3_t {
42920    svcreate3_f32(svdup_n_f32(0f32), svdup_n_f32(0f32), svdup_n_f32(0f32))
42921}
42922#[doc = "Create an uninitialized tuple of three vectors"]
42923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f64)"]
42924#[doc = "## Safety"]
42925#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42926#[inline(always)]
42927#[target_feature(enable = "sve")]
42928#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42929pub unsafe fn svundef3_f64() -> svfloat64x3_t {
42930    svcreate3_f64(svdup_n_f64(0f64), svdup_n_f64(0f64), svdup_n_f64(0f64))
42931}
42932#[doc = "Create an uninitialized tuple of three vectors"]
42933#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s8)"]
42934#[doc = "## Safety"]
42935#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42936#[inline(always)]
42937#[target_feature(enable = "sve")]
42938#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42939pub unsafe fn svundef3_s8() -> svint8x3_t {
42940    svcreate3_s8(svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0))
42941}
42942#[doc = "Create an uninitialized tuple of three vectors"]
42943#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s16)"]
42944#[doc = "## Safety"]
42945#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42946#[inline(always)]
42947#[target_feature(enable = "sve")]
42948#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42949pub unsafe fn svundef3_s16() -> svint16x3_t {
42950    svcreate3_s16(svdup_n_s16(0), svdup_n_s16(0), svdup_n_s16(0))
42951}
42952#[doc = "Create an uninitialized tuple of three vectors"]
42953#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s32)"]
42954#[doc = "## Safety"]
42955#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42956#[inline(always)]
42957#[target_feature(enable = "sve")]
42958#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42959pub unsafe fn svundef3_s32() -> svint32x3_t {
42960    svcreate3_s32(svdup_n_s32(0), svdup_n_s32(0), svdup_n_s32(0))
42961}
42962#[doc = "Create an uninitialized tuple of three vectors"]
42963#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s64)"]
42964#[doc = "## Safety"]
42965#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42966#[inline(always)]
42967#[target_feature(enable = "sve")]
42968#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42969pub unsafe fn svundef3_s64() -> svint64x3_t {
42970    svcreate3_s64(svdup_n_s64(0), svdup_n_s64(0), svdup_n_s64(0))
42971}
42972#[doc = "Create an uninitialized tuple of three vectors"]
42973#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u8)"]
42974#[doc = "## Safety"]
42975#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42976#[inline(always)]
42977#[target_feature(enable = "sve")]
42978#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42979pub unsafe fn svundef3_u8() -> svuint8x3_t {
42980    svcreate3_u8(svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0))
42981}
42982#[doc = "Create an uninitialized tuple of three vectors"]
42983#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u16)"]
42984#[doc = "## Safety"]
42985#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42986#[inline(always)]
42987#[target_feature(enable = "sve")]
42988#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42989pub unsafe fn svundef3_u16() -> svuint16x3_t {
42990    svcreate3_u16(svdup_n_u16(0), svdup_n_u16(0), svdup_n_u16(0))
42991}
42992#[doc = "Create an uninitialized tuple of three vectors"]
42993#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u32)"]
42994#[doc = "## Safety"]
42995#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
42996#[inline(always)]
42997#[target_feature(enable = "sve")]
42998#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
42999pub unsafe fn svundef3_u32() -> svuint32x3_t {
43000    svcreate3_u32(svdup_n_u32(0), svdup_n_u32(0), svdup_n_u32(0))
43001}
43002#[doc = "Create an uninitialized tuple of three vectors"]
43003#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u64)"]
43004#[doc = "## Safety"]
43005#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43006#[inline(always)]
43007#[target_feature(enable = "sve")]
43008#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43009pub unsafe fn svundef3_u64() -> svuint64x3_t {
43010    svcreate3_u64(svdup_n_u64(0), svdup_n_u64(0), svdup_n_u64(0))
43011}
43012#[doc = "Create an uninitialized tuple of four vectors"]
43013#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f32)"]
43014#[doc = "## Safety"]
43015#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43016#[inline(always)]
43017#[target_feature(enable = "sve")]
43018#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43019pub unsafe fn svundef4_f32() -> svfloat32x4_t {
43020    svcreate4_f32(
43021        svdup_n_f32(0f32),
43022        svdup_n_f32(0f32),
43023        svdup_n_f32(0f32),
43024        svdup_n_f32(0f32),
43025    )
43026}
43027#[doc = "Create an uninitialized tuple of four vectors"]
43028#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f64)"]
43029#[doc = "## Safety"]
43030#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43031#[inline(always)]
43032#[target_feature(enable = "sve")]
43033#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43034pub unsafe fn svundef4_f64() -> svfloat64x4_t {
43035    svcreate4_f64(
43036        svdup_n_f64(0f64),
43037        svdup_n_f64(0f64),
43038        svdup_n_f64(0f64),
43039        svdup_n_f64(0f64),
43040    )
43041}
43042#[doc = "Create an uninitialized tuple of four vectors"]
43043#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s8)"]
43044#[doc = "## Safety"]
43045#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43046#[inline(always)]
43047#[target_feature(enable = "sve")]
43048#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43049pub unsafe fn svundef4_s8() -> svint8x4_t {
43050    svcreate4_s8(svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0))
43051}
43052#[doc = "Create an uninitialized tuple of four vectors"]
43053#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s16)"]
43054#[doc = "## Safety"]
43055#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43056#[inline(always)]
43057#[target_feature(enable = "sve")]
43058#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43059pub unsafe fn svundef4_s16() -> svint16x4_t {
43060    svcreate4_s16(
43061        svdup_n_s16(0),
43062        svdup_n_s16(0),
43063        svdup_n_s16(0),
43064        svdup_n_s16(0),
43065    )
43066}
43067#[doc = "Create an uninitialized tuple of four vectors"]
43068#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s32)"]
43069#[doc = "## Safety"]
43070#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43071#[inline(always)]
43072#[target_feature(enable = "sve")]
43073#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43074pub unsafe fn svundef4_s32() -> svint32x4_t {
43075    svcreate4_s32(
43076        svdup_n_s32(0),
43077        svdup_n_s32(0),
43078        svdup_n_s32(0),
43079        svdup_n_s32(0),
43080    )
43081}
43082#[doc = "Create an uninitialized tuple of four vectors"]
43083#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s64)"]
43084#[doc = "## Safety"]
43085#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43086#[inline(always)]
43087#[target_feature(enable = "sve")]
43088#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43089pub unsafe fn svundef4_s64() -> svint64x4_t {
43090    svcreate4_s64(
43091        svdup_n_s64(0),
43092        svdup_n_s64(0),
43093        svdup_n_s64(0),
43094        svdup_n_s64(0),
43095    )
43096}
43097#[doc = "Create an uninitialized tuple of four vectors"]
43098#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u8)"]
43099#[doc = "## Safety"]
43100#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43101#[inline(always)]
43102#[target_feature(enable = "sve")]
43103#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43104pub unsafe fn svundef4_u8() -> svuint8x4_t {
43105    svcreate4_u8(svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0))
43106}
43107#[doc = "Create an uninitialized tuple of four vectors"]
43108#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u16)"]
43109#[doc = "## Safety"]
43110#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43111#[inline(always)]
43112#[target_feature(enable = "sve")]
43113#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43114pub unsafe fn svundef4_u16() -> svuint16x4_t {
43115    svcreate4_u16(
43116        svdup_n_u16(0),
43117        svdup_n_u16(0),
43118        svdup_n_u16(0),
43119        svdup_n_u16(0),
43120    )
43121}
43122#[doc = "Create an uninitialized tuple of four vectors"]
43123#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u32)"]
43124#[doc = "## Safety"]
43125#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43126#[inline(always)]
43127#[target_feature(enable = "sve")]
43128#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43129pub unsafe fn svundef4_u32() -> svuint32x4_t {
43130    svcreate4_u32(
43131        svdup_n_u32(0),
43132        svdup_n_u32(0),
43133        svdup_n_u32(0),
43134        svdup_n_u32(0),
43135    )
43136}
43137#[doc = "Create an uninitialized tuple of four vectors"]
43138#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u64)"]
43139#[doc = "## Safety"]
43140#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43141#[inline(always)]
43142#[target_feature(enable = "sve")]
43143#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43144pub unsafe fn svundef4_u64() -> svuint64x4_t {
43145    svcreate4_u64(
43146        svdup_n_u64(0),
43147        svdup_n_u64(0),
43148        svdup_n_u64(0),
43149        svdup_n_u64(0),
43150    )
43151}
43152#[doc = "Create an uninitialized vector"]
43153#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f32)"]
43154#[doc = "## Safety"]
43155#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43156#[inline(always)]
43157#[target_feature(enable = "sve")]
43158#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43159pub unsafe fn svundef_f32() -> svfloat32_t {
43160    svdup_n_f32(0f32)
43161}
43162#[doc = "Create an uninitialized vector"]
43163#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f64)"]
43164#[doc = "## Safety"]
43165#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43166#[inline(always)]
43167#[target_feature(enable = "sve")]
43168#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43169pub unsafe fn svundef_f64() -> svfloat64_t {
43170    svdup_n_f64(0f64)
43171}
43172#[doc = "Create an uninitialized vector"]
43173#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s8)"]
43174#[doc = "## Safety"]
43175#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43176#[inline(always)]
43177#[target_feature(enable = "sve")]
43178#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43179pub unsafe fn svundef_s8() -> svint8_t {
43180    svdup_n_s8(0)
43181}
43182#[doc = "Create an uninitialized vector"]
43183#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s16)"]
43184#[doc = "## Safety"]
43185#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43186#[inline(always)]
43187#[target_feature(enable = "sve")]
43188#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43189pub unsafe fn svundef_s16() -> svint16_t {
43190    svdup_n_s16(0)
43191}
43192#[doc = "Create an uninitialized vector"]
43193#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s32)"]
43194#[doc = "## Safety"]
43195#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43196#[inline(always)]
43197#[target_feature(enable = "sve")]
43198#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43199pub unsafe fn svundef_s32() -> svint32_t {
43200    svdup_n_s32(0)
43201}
43202#[doc = "Create an uninitialized vector"]
43203#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s64)"]
43204#[doc = "## Safety"]
43205#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43206#[inline(always)]
43207#[target_feature(enable = "sve")]
43208#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43209pub unsafe fn svundef_s64() -> svint64_t {
43210    svdup_n_s64(0)
43211}
43212#[doc = "Create an uninitialized vector"]
43213#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u8)"]
43214#[doc = "## Safety"]
43215#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43216#[inline(always)]
43217#[target_feature(enable = "sve")]
43218#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43219pub unsafe fn svundef_u8() -> svuint8_t {
43220    svdup_n_u8(0)
43221}
43222#[doc = "Create an uninitialized vector"]
43223#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u16)"]
43224#[doc = "## Safety"]
43225#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43226#[inline(always)]
43227#[target_feature(enable = "sve")]
43228#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43229pub unsafe fn svundef_u16() -> svuint16_t {
43230    svdup_n_u16(0)
43231}
43232#[doc = "Create an uninitialized vector"]
43233#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u32)"]
43234#[doc = "## Safety"]
43235#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43236#[inline(always)]
43237#[target_feature(enable = "sve")]
43238#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43239pub unsafe fn svundef_u32() -> svuint32_t {
43240    svdup_n_u32(0)
43241}
43242#[doc = "Create an uninitialized vector"]
43243#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u64)"]
43244#[doc = "## Safety"]
43245#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
43246#[inline(always)]
43247#[target_feature(enable = "sve")]
43248#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43249pub unsafe fn svundef_u64() -> svuint64_t {
43250    svdup_n_u64(0)
43251}
43252#[doc = "Dot product (unsigned × signed)"]
43253#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot_lane[_s32])"]
43254#[inline(always)]
43255#[target_feature(enable = "sve,i8mm")]
43256#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43257#[cfg_attr(test, assert_instr(usdot, IMM_INDEX = 0))]
43258pub fn svusdot_lane_s32<const IMM_INDEX: i32>(
43259    op1: svint32_t,
43260    op2: svuint8_t,
43261    op3: svint8_t,
43262) -> svint32_t {
43263    static_assert_range!(IMM_INDEX, 0..=3);
43264    unsafe extern "unadjusted" {
43265        #[cfg_attr(
43266            target_arch = "aarch64",
43267            link_name = "llvm.aarch64.sve.usdot.lane.nxv4i32"
43268        )]
43269        fn _svusdot_lane_s32(
43270            op1: svint32_t,
43271            op2: svint8_t,
43272            op3: svint8_t,
43273            imm_index: i32,
43274        ) -> svint32_t;
43275    }
43276    unsafe { _svusdot_lane_s32(op1, op2.as_signed(), op3, IMM_INDEX) }
43277}
43278#[doc = "Dot product (unsigned × signed)"]
43279#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_s32])"]
43280#[inline(always)]
43281#[target_feature(enable = "sve,i8mm")]
43282#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43283#[cfg_attr(test, assert_instr(usdot))]
43284pub fn svusdot_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t {
43285    unsafe extern "unadjusted" {
43286        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usdot.nxv4i32")]
43287        fn _svusdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
43288    }
43289    unsafe { _svusdot_s32(op1, op2.as_signed(), op3) }
43290}
43291#[doc = "Dot product (unsigned × signed)"]
43292#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_n_s32])"]
43293#[inline(always)]
43294#[target_feature(enable = "sve,i8mm")]
43295#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43296#[cfg_attr(test, assert_instr(usdot))]
43297pub fn svusdot_n_s32(op1: svint32_t, op2: svuint8_t, op3: i8) -> svint32_t {
43298    svusdot_s32(op1, op2, svdup_n_s8(op3))
43299}
43300#[doc = "Matrix multiply-accumulate (unsigned × signed)"]
43301#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusmmla[_s32])"]
43302#[inline(always)]
43303#[target_feature(enable = "sve,i8mm")]
43304#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43305#[cfg_attr(test, assert_instr(usmmla))]
43306pub fn svusmmla_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t {
43307    unsafe extern "unadjusted" {
43308        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usmmla.nxv4i32")]
43309        fn _svusmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
43310    }
43311    unsafe { _svusmmla_s32(op1, op2.as_signed(), op3) }
43312}
43313#[doc = "Concatenate even elements from two inputs"]
43314#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b8)"]
43315#[inline(always)]
43316#[target_feature(enable = "sve")]
43317#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43318#[cfg_attr(test, assert_instr(uzp1))]
43319pub fn svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
43320    unsafe extern "unadjusted" {
43321        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i1")]
43322        fn _svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
43323    }
43324    unsafe { _svuzp1_b8(op1, op2) }
43325}
43326#[doc = "Concatenate even elements from two inputs"]
43327#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b16)"]
43328#[inline(always)]
43329#[target_feature(enable = "sve")]
43330#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43331#[cfg_attr(test, assert_instr(uzp1))]
43332pub fn svuzp1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
43333    unsafe extern "unadjusted" {
43334        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i1")]
43335        fn _svuzp1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
43336    }
43337    unsafe { _svuzp1_b16(op1.sve_into(), op2.sve_into()).sve_into() }
43338}
43339#[doc = "Concatenate even elements from two inputs"]
43340#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b32)"]
43341#[inline(always)]
43342#[target_feature(enable = "sve")]
43343#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43344#[cfg_attr(test, assert_instr(uzp1))]
43345pub fn svuzp1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
43346    unsafe extern "unadjusted" {
43347        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i1")]
43348        fn _svuzp1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
43349    }
43350    unsafe { _svuzp1_b32(op1.sve_into(), op2.sve_into()).sve_into() }
43351}
43352#[doc = "Concatenate even elements from two inputs"]
43353#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b64)"]
43354#[inline(always)]
43355#[target_feature(enable = "sve")]
43356#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43357#[cfg_attr(test, assert_instr(uzp1))]
43358pub fn svuzp1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
43359    unsafe extern "unadjusted" {
43360        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i1")]
43361        fn _svuzp1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
43362    }
43363    unsafe { _svuzp1_b64(op1.sve_into(), op2.sve_into()).sve_into() }
43364}
43365#[doc = "Concatenate even elements from two inputs"]
43366#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f32])"]
43367#[inline(always)]
43368#[target_feature(enable = "sve")]
43369#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43370#[cfg_attr(test, assert_instr(uzp1))]
43371pub fn svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
43372    unsafe extern "unadjusted" {
43373        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4f32")]
43374        fn _svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
43375    }
43376    unsafe { _svuzp1_f32(op1, op2) }
43377}
43378#[doc = "Concatenate even elements from two inputs"]
43379#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f64])"]
43380#[inline(always)]
43381#[target_feature(enable = "sve")]
43382#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43383#[cfg_attr(test, assert_instr(uzp1))]
43384pub fn svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
43385    unsafe extern "unadjusted" {
43386        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2f64")]
43387        fn _svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
43388    }
43389    unsafe { _svuzp1_f64(op1, op2) }
43390}
43391#[doc = "Concatenate even elements from two inputs"]
43392#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s8])"]
43393#[inline(always)]
43394#[target_feature(enable = "sve")]
43395#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43396#[cfg_attr(test, assert_instr(uzp1))]
43397pub fn svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
43398    unsafe extern "unadjusted" {
43399        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i8")]
43400        fn _svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
43401    }
43402    unsafe { _svuzp1_s8(op1, op2) }
43403}
43404#[doc = "Concatenate even elements from two inputs"]
43405#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s16])"]
43406#[inline(always)]
43407#[target_feature(enable = "sve")]
43408#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43409#[cfg_attr(test, assert_instr(uzp1))]
43410pub fn svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
43411    unsafe extern "unadjusted" {
43412        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i16")]
43413        fn _svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
43414    }
43415    unsafe { _svuzp1_s16(op1, op2) }
43416}
43417#[doc = "Concatenate even elements from two inputs"]
43418#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s32])"]
43419#[inline(always)]
43420#[target_feature(enable = "sve")]
43421#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43422#[cfg_attr(test, assert_instr(uzp1))]
43423pub fn svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
43424    unsafe extern "unadjusted" {
43425        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i32")]
43426        fn _svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
43427    }
43428    unsafe { _svuzp1_s32(op1, op2) }
43429}
43430#[doc = "Concatenate even elements from two inputs"]
43431#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s64])"]
43432#[inline(always)]
43433#[target_feature(enable = "sve")]
43434#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43435#[cfg_attr(test, assert_instr(uzp1))]
43436pub fn svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
43437    unsafe extern "unadjusted" {
43438        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i64")]
43439        fn _svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
43440    }
43441    unsafe { _svuzp1_s64(op1, op2) }
43442}
43443#[doc = "Concatenate even elements from two inputs"]
43444#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u8])"]
43445#[inline(always)]
43446#[target_feature(enable = "sve")]
43447#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43448#[cfg_attr(test, assert_instr(uzp1))]
43449pub fn svuzp1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
43450    unsafe { svuzp1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
43451}
43452#[doc = "Concatenate even elements from two inputs"]
43453#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u16])"]
43454#[inline(always)]
43455#[target_feature(enable = "sve")]
43456#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43457#[cfg_attr(test, assert_instr(uzp1))]
43458pub fn svuzp1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
43459    unsafe { svuzp1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
43460}
43461#[doc = "Concatenate even elements from two inputs"]
43462#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u32])"]
43463#[inline(always)]
43464#[target_feature(enable = "sve")]
43465#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43466#[cfg_attr(test, assert_instr(uzp1))]
43467pub fn svuzp1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
43468    unsafe { svuzp1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
43469}
43470#[doc = "Concatenate even elements from two inputs"]
43471#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u64])"]
43472#[inline(always)]
43473#[target_feature(enable = "sve")]
43474#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43475#[cfg_attr(test, assert_instr(uzp1))]
43476pub fn svuzp1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
43477    unsafe { svuzp1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
43478}
43479#[doc = "Concatenate even quadwords from two inputs"]
43480#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f32])"]
43481#[inline(always)]
43482#[target_feature(enable = "sve,f64mm")]
43483#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43484#[cfg_attr(test, assert_instr(uzp1))]
43485pub fn svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
43486    unsafe extern "unadjusted" {
43487        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4f32")]
43488        fn _svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
43489    }
43490    unsafe { _svuzp1q_f32(op1, op2) }
43491}
43492#[doc = "Concatenate even quadwords from two inputs"]
43493#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f64])"]
43494#[inline(always)]
43495#[target_feature(enable = "sve,f64mm")]
43496#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43497#[cfg_attr(test, assert_instr(uzp1))]
43498pub fn svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
43499    unsafe extern "unadjusted" {
43500        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2f64")]
43501        fn _svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
43502    }
43503    unsafe { _svuzp1q_f64(op1, op2) }
43504}
43505#[doc = "Concatenate even quadwords from two inputs"]
43506#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s8])"]
43507#[inline(always)]
43508#[target_feature(enable = "sve,f64mm")]
43509#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43510#[cfg_attr(test, assert_instr(uzp1))]
43511pub fn svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
43512    unsafe extern "unadjusted" {
43513        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv16i8")]
43514        fn _svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
43515    }
43516    unsafe { _svuzp1q_s8(op1, op2) }
43517}
43518#[doc = "Concatenate even quadwords from two inputs"]
43519#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s16])"]
43520#[inline(always)]
43521#[target_feature(enable = "sve,f64mm")]
43522#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43523#[cfg_attr(test, assert_instr(uzp1))]
43524pub fn svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
43525    unsafe extern "unadjusted" {
43526        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv8i16")]
43527        fn _svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
43528    }
43529    unsafe { _svuzp1q_s16(op1, op2) }
43530}
43531#[doc = "Concatenate even quadwords from two inputs"]
43532#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s32])"]
43533#[inline(always)]
43534#[target_feature(enable = "sve,f64mm")]
43535#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43536#[cfg_attr(test, assert_instr(uzp1))]
43537pub fn svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
43538    unsafe extern "unadjusted" {
43539        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4i32")]
43540        fn _svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
43541    }
43542    unsafe { _svuzp1q_s32(op1, op2) }
43543}
43544#[doc = "Concatenate even quadwords from two inputs"]
43545#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s64])"]
43546#[inline(always)]
43547#[target_feature(enable = "sve,f64mm")]
43548#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43549#[cfg_attr(test, assert_instr(uzp1))]
43550pub fn svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
43551    unsafe extern "unadjusted" {
43552        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2i64")]
43553        fn _svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
43554    }
43555    unsafe { _svuzp1q_s64(op1, op2) }
43556}
43557#[doc = "Concatenate even quadwords from two inputs"]
43558#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u8])"]
43559#[inline(always)]
43560#[target_feature(enable = "sve,f64mm")]
43561#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43562#[cfg_attr(test, assert_instr(uzp1))]
43563pub fn svuzp1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
43564    unsafe { svuzp1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
43565}
43566#[doc = "Concatenate even quadwords from two inputs"]
43567#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u16])"]
43568#[inline(always)]
43569#[target_feature(enable = "sve,f64mm")]
43570#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43571#[cfg_attr(test, assert_instr(uzp1))]
43572pub fn svuzp1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
43573    unsafe { svuzp1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
43574}
43575#[doc = "Concatenate even quadwords from two inputs"]
43576#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u32])"]
43577#[inline(always)]
43578#[target_feature(enable = "sve,f64mm")]
43579#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43580#[cfg_attr(test, assert_instr(uzp1))]
43581pub fn svuzp1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
43582    unsafe { svuzp1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
43583}
43584#[doc = "Concatenate even quadwords from two inputs"]
43585#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u64])"]
43586#[inline(always)]
43587#[target_feature(enable = "sve,f64mm")]
43588#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43589#[cfg_attr(test, assert_instr(uzp1))]
43590pub fn svuzp1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
43591    unsafe { svuzp1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
43592}
43593#[doc = "Concatenate odd elements from two inputs"]
43594#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b8)"]
43595#[inline(always)]
43596#[target_feature(enable = "sve")]
43597#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43598#[cfg_attr(test, assert_instr(uzp2))]
43599pub fn svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
43600    unsafe extern "unadjusted" {
43601        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i1")]
43602        fn _svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
43603    }
43604    unsafe { _svuzp2_b8(op1, op2) }
43605}
43606#[doc = "Concatenate odd elements from two inputs"]
43607#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b16)"]
43608#[inline(always)]
43609#[target_feature(enable = "sve")]
43610#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43611#[cfg_attr(test, assert_instr(uzp2))]
43612pub fn svuzp2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
43613    unsafe extern "unadjusted" {
43614        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i1")]
43615        fn _svuzp2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
43616    }
43617    unsafe { _svuzp2_b16(op1.sve_into(), op2.sve_into()).sve_into() }
43618}
43619#[doc = "Concatenate odd elements from two inputs"]
43620#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b32)"]
43621#[inline(always)]
43622#[target_feature(enable = "sve")]
43623#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43624#[cfg_attr(test, assert_instr(uzp2))]
43625pub fn svuzp2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
43626    unsafe extern "unadjusted" {
43627        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i1")]
43628        fn _svuzp2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
43629    }
43630    unsafe { _svuzp2_b32(op1.sve_into(), op2.sve_into()).sve_into() }
43631}
43632#[doc = "Concatenate odd elements from two inputs"]
43633#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b64)"]
43634#[inline(always)]
43635#[target_feature(enable = "sve")]
43636#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43637#[cfg_attr(test, assert_instr(uzp2))]
43638pub fn svuzp2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
43639    unsafe extern "unadjusted" {
43640        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i1")]
43641        fn _svuzp2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
43642    }
43643    unsafe { _svuzp2_b64(op1.sve_into(), op2.sve_into()).sve_into() }
43644}
43645#[doc = "Concatenate odd elements from two inputs"]
43646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f32])"]
43647#[inline(always)]
43648#[target_feature(enable = "sve")]
43649#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43650#[cfg_attr(test, assert_instr(uzp2))]
43651pub fn svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
43652    unsafe extern "unadjusted" {
43653        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4f32")]
43654        fn _svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
43655    }
43656    unsafe { _svuzp2_f32(op1, op2) }
43657}
43658#[doc = "Concatenate odd elements from two inputs"]
43659#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f64])"]
43660#[inline(always)]
43661#[target_feature(enable = "sve")]
43662#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43663#[cfg_attr(test, assert_instr(uzp2))]
43664pub fn svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
43665    unsafe extern "unadjusted" {
43666        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2f64")]
43667        fn _svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
43668    }
43669    unsafe { _svuzp2_f64(op1, op2) }
43670}
43671#[doc = "Concatenate odd elements from two inputs"]
43672#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s8])"]
43673#[inline(always)]
43674#[target_feature(enable = "sve")]
43675#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43676#[cfg_attr(test, assert_instr(uzp2))]
43677pub fn svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
43678    unsafe extern "unadjusted" {
43679        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i8")]
43680        fn _svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
43681    }
43682    unsafe { _svuzp2_s8(op1, op2) }
43683}
43684#[doc = "Concatenate odd elements from two inputs"]
43685#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s16])"]
43686#[inline(always)]
43687#[target_feature(enable = "sve")]
43688#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43689#[cfg_attr(test, assert_instr(uzp2))]
43690pub fn svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
43691    unsafe extern "unadjusted" {
43692        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i16")]
43693        fn _svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
43694    }
43695    unsafe { _svuzp2_s16(op1, op2) }
43696}
43697#[doc = "Concatenate odd elements from two inputs"]
43698#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s32])"]
43699#[inline(always)]
43700#[target_feature(enable = "sve")]
43701#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43702#[cfg_attr(test, assert_instr(uzp2))]
43703pub fn svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
43704    unsafe extern "unadjusted" {
43705        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i32")]
43706        fn _svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
43707    }
43708    unsafe { _svuzp2_s32(op1, op2) }
43709}
43710#[doc = "Concatenate odd elements from two inputs"]
43711#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s64])"]
43712#[inline(always)]
43713#[target_feature(enable = "sve")]
43714#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43715#[cfg_attr(test, assert_instr(uzp2))]
43716pub fn svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
43717    unsafe extern "unadjusted" {
43718        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i64")]
43719        fn _svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
43720    }
43721    unsafe { _svuzp2_s64(op1, op2) }
43722}
43723#[doc = "Concatenate odd elements from two inputs"]
43724#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u8])"]
43725#[inline(always)]
43726#[target_feature(enable = "sve")]
43727#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43728#[cfg_attr(test, assert_instr(uzp2))]
43729pub fn svuzp2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
43730    unsafe { svuzp2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
43731}
43732#[doc = "Concatenate odd elements from two inputs"]
43733#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u16])"]
43734#[inline(always)]
43735#[target_feature(enable = "sve")]
43736#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43737#[cfg_attr(test, assert_instr(uzp2))]
43738pub fn svuzp2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
43739    unsafe { svuzp2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
43740}
43741#[doc = "Concatenate odd elements from two inputs"]
43742#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u32])"]
43743#[inline(always)]
43744#[target_feature(enable = "sve")]
43745#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43746#[cfg_attr(test, assert_instr(uzp2))]
43747pub fn svuzp2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
43748    unsafe { svuzp2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
43749}
43750#[doc = "Concatenate odd elements from two inputs"]
43751#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u64])"]
43752#[inline(always)]
43753#[target_feature(enable = "sve")]
43754#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43755#[cfg_attr(test, assert_instr(uzp2))]
43756pub fn svuzp2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
43757    unsafe { svuzp2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
43758}
43759#[doc = "Concatenate odd quadwords from two inputs"]
43760#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f32])"]
43761#[inline(always)]
43762#[target_feature(enable = "sve,f64mm")]
43763#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43764#[cfg_attr(test, assert_instr(uzp2))]
43765pub fn svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
43766    unsafe extern "unadjusted" {
43767        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4f32")]
43768        fn _svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
43769    }
43770    unsafe { _svuzp2q_f32(op1, op2) }
43771}
43772#[doc = "Concatenate odd quadwords from two inputs"]
43773#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f64])"]
43774#[inline(always)]
43775#[target_feature(enable = "sve,f64mm")]
43776#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43777#[cfg_attr(test, assert_instr(uzp2))]
43778pub fn svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
43779    unsafe extern "unadjusted" {
43780        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2f64")]
43781        fn _svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
43782    }
43783    unsafe { _svuzp2q_f64(op1, op2) }
43784}
43785#[doc = "Concatenate odd quadwords from two inputs"]
43786#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s8])"]
43787#[inline(always)]
43788#[target_feature(enable = "sve,f64mm")]
43789#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43790#[cfg_attr(test, assert_instr(uzp2))]
43791pub fn svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
43792    unsafe extern "unadjusted" {
43793        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv16i8")]
43794        fn _svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
43795    }
43796    unsafe { _svuzp2q_s8(op1, op2) }
43797}
43798#[doc = "Concatenate odd quadwords from two inputs"]
43799#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s16])"]
43800#[inline(always)]
43801#[target_feature(enable = "sve,f64mm")]
43802#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43803#[cfg_attr(test, assert_instr(uzp2))]
43804pub fn svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
43805    unsafe extern "unadjusted" {
43806        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv8i16")]
43807        fn _svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
43808    }
43809    unsafe { _svuzp2q_s16(op1, op2) }
43810}
43811#[doc = "Concatenate odd quadwords from two inputs"]
43812#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s32])"]
43813#[inline(always)]
43814#[target_feature(enable = "sve,f64mm")]
43815#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43816#[cfg_attr(test, assert_instr(uzp2))]
43817pub fn svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
43818    unsafe extern "unadjusted" {
43819        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4i32")]
43820        fn _svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
43821    }
43822    unsafe { _svuzp2q_s32(op1, op2) }
43823}
43824#[doc = "Concatenate odd quadwords from two inputs"]
43825#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s64])"]
43826#[inline(always)]
43827#[target_feature(enable = "sve,f64mm")]
43828#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43829#[cfg_attr(test, assert_instr(uzp2))]
43830pub fn svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
43831    unsafe extern "unadjusted" {
43832        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2i64")]
43833        fn _svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
43834    }
43835    unsafe { _svuzp2q_s64(op1, op2) }
43836}
43837#[doc = "Concatenate odd quadwords from two inputs"]
43838#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u8])"]
43839#[inline(always)]
43840#[target_feature(enable = "sve,f64mm")]
43841#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43842#[cfg_attr(test, assert_instr(uzp2))]
43843pub fn svuzp2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
43844    unsafe { svuzp2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
43845}
43846#[doc = "Concatenate odd quadwords from two inputs"]
43847#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u16])"]
43848#[inline(always)]
43849#[target_feature(enable = "sve,f64mm")]
43850#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43851#[cfg_attr(test, assert_instr(uzp2))]
43852pub fn svuzp2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
43853    unsafe { svuzp2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
43854}
43855#[doc = "Concatenate odd quadwords from two inputs"]
43856#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u32])"]
43857#[inline(always)]
43858#[target_feature(enable = "sve,f64mm")]
43859#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43860#[cfg_attr(test, assert_instr(uzp2))]
43861pub fn svuzp2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
43862    unsafe { svuzp2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
43863}
43864#[doc = "Concatenate odd quadwords from two inputs"]
43865#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u64])"]
43866#[inline(always)]
43867#[target_feature(enable = "sve,f64mm")]
43868#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43869#[cfg_attr(test, assert_instr(uzp2))]
43870pub fn svuzp2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
43871    unsafe { svuzp2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
43872}
43873#[doc = "While incrementing scalar is less than or equal to"]
43874#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s32])"]
43875#[inline(always)]
43876#[target_feature(enable = "sve")]
43877#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43878#[cfg_attr(test, assert_instr(whilele))]
43879pub fn svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t {
43880    unsafe extern "unadjusted" {
43881        #[cfg_attr(
43882            target_arch = "aarch64",
43883            link_name = "llvm.aarch64.sve.whilele.nxv16i1.i32"
43884        )]
43885        fn _svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t;
43886    }
43887    unsafe { _svwhilele_b8_s32(op1, op2) }
43888}
43889#[doc = "While incrementing scalar is less than or equal to"]
43890#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s32])"]
43891#[inline(always)]
43892#[target_feature(enable = "sve")]
43893#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43894#[cfg_attr(test, assert_instr(whilele))]
43895pub fn svwhilele_b16_s32(op1: i32, op2: i32) -> svbool_t {
43896    unsafe extern "unadjusted" {
43897        #[cfg_attr(
43898            target_arch = "aarch64",
43899            link_name = "llvm.aarch64.sve.whilele.nxv8i1.i32"
43900        )]
43901        fn _svwhilele_b16_s32(op1: i32, op2: i32) -> svbool8_t;
43902    }
43903    unsafe { _svwhilele_b16_s32(op1, op2).sve_into() }
43904}
43905#[doc = "While incrementing scalar is less than or equal to"]
43906#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s32])"]
43907#[inline(always)]
43908#[target_feature(enable = "sve")]
43909#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43910#[cfg_attr(test, assert_instr(whilele))]
43911pub fn svwhilele_b32_s32(op1: i32, op2: i32) -> svbool_t {
43912    unsafe extern "unadjusted" {
43913        #[cfg_attr(
43914            target_arch = "aarch64",
43915            link_name = "llvm.aarch64.sve.whilele.nxv4i1.i32"
43916        )]
43917        fn _svwhilele_b32_s32(op1: i32, op2: i32) -> svbool4_t;
43918    }
43919    unsafe { _svwhilele_b32_s32(op1, op2).sve_into() }
43920}
43921#[doc = "While incrementing scalar is less than or equal to"]
43922#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s32])"]
43923#[inline(always)]
43924#[target_feature(enable = "sve")]
43925#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43926#[cfg_attr(test, assert_instr(whilele))]
43927pub fn svwhilele_b64_s32(op1: i32, op2: i32) -> svbool_t {
43928    unsafe extern "unadjusted" {
43929        #[cfg_attr(
43930            target_arch = "aarch64",
43931            link_name = "llvm.aarch64.sve.whilele.nxv2i1.i32"
43932        )]
43933        fn _svwhilele_b64_s32(op1: i32, op2: i32) -> svbool2_t;
43934    }
43935    unsafe { _svwhilele_b64_s32(op1, op2).sve_into() }
43936}
43937#[doc = "While incrementing scalar is less than or equal to"]
43938#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s64])"]
43939#[inline(always)]
43940#[target_feature(enable = "sve")]
43941#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43942#[cfg_attr(test, assert_instr(whilele))]
43943pub fn svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t {
43944    unsafe extern "unadjusted" {
43945        #[cfg_attr(
43946            target_arch = "aarch64",
43947            link_name = "llvm.aarch64.sve.whilele.nxv16i1.i64"
43948        )]
43949        fn _svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t;
43950    }
43951    unsafe { _svwhilele_b8_s64(op1, op2) }
43952}
43953#[doc = "While incrementing scalar is less than or equal to"]
43954#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s64])"]
43955#[inline(always)]
43956#[target_feature(enable = "sve")]
43957#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43958#[cfg_attr(test, assert_instr(whilele))]
43959pub fn svwhilele_b16_s64(op1: i64, op2: i64) -> svbool_t {
43960    unsafe extern "unadjusted" {
43961        #[cfg_attr(
43962            target_arch = "aarch64",
43963            link_name = "llvm.aarch64.sve.whilele.nxv8i1.i64"
43964        )]
43965        fn _svwhilele_b16_s64(op1: i64, op2: i64) -> svbool8_t;
43966    }
43967    unsafe { _svwhilele_b16_s64(op1, op2).sve_into() }
43968}
43969#[doc = "While incrementing scalar is less than or equal to"]
43970#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s64])"]
43971#[inline(always)]
43972#[target_feature(enable = "sve")]
43973#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43974#[cfg_attr(test, assert_instr(whilele))]
43975pub fn svwhilele_b32_s64(op1: i64, op2: i64) -> svbool_t {
43976    unsafe extern "unadjusted" {
43977        #[cfg_attr(
43978            target_arch = "aarch64",
43979            link_name = "llvm.aarch64.sve.whilele.nxv4i1.i64"
43980        )]
43981        fn _svwhilele_b32_s64(op1: i64, op2: i64) -> svbool4_t;
43982    }
43983    unsafe { _svwhilele_b32_s64(op1, op2).sve_into() }
43984}
43985#[doc = "While incrementing scalar is less than or equal to"]
43986#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s64])"]
43987#[inline(always)]
43988#[target_feature(enable = "sve")]
43989#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
43990#[cfg_attr(test, assert_instr(whilele))]
43991pub fn svwhilele_b64_s64(op1: i64, op2: i64) -> svbool_t {
43992    unsafe extern "unadjusted" {
43993        #[cfg_attr(
43994            target_arch = "aarch64",
43995            link_name = "llvm.aarch64.sve.whilele.nxv2i1.i64"
43996        )]
43997        fn _svwhilele_b64_s64(op1: i64, op2: i64) -> svbool2_t;
43998    }
43999    unsafe { _svwhilele_b64_s64(op1, op2).sve_into() }
44000}
44001#[doc = "While incrementing scalar is less than or equal to"]
44002#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u32])"]
44003#[inline(always)]
44004#[target_feature(enable = "sve")]
44005#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44006#[cfg_attr(test, assert_instr(whilels))]
44007pub fn svwhilele_b8_u32(op1: u32, op2: u32) -> svbool_t {
44008    unsafe extern "unadjusted" {
44009        #[cfg_attr(
44010            target_arch = "aarch64",
44011            link_name = "llvm.aarch64.sve.whilels.nxv16i1.i32"
44012        )]
44013        fn _svwhilele_b8_u32(op1: i32, op2: i32) -> svbool_t;
44014    }
44015    unsafe { _svwhilele_b8_u32(op1.as_signed(), op2.as_signed()) }
44016}
44017#[doc = "While incrementing scalar is less than or equal to"]
44018#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u32])"]
44019#[inline(always)]
44020#[target_feature(enable = "sve")]
44021#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44022#[cfg_attr(test, assert_instr(whilels))]
44023pub fn svwhilele_b16_u32(op1: u32, op2: u32) -> svbool_t {
44024    unsafe extern "unadjusted" {
44025        #[cfg_attr(
44026            target_arch = "aarch64",
44027            link_name = "llvm.aarch64.sve.whilels.nxv8i1.i32"
44028        )]
44029        fn _svwhilele_b16_u32(op1: i32, op2: i32) -> svbool8_t;
44030    }
44031    unsafe { _svwhilele_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() }
44032}
44033#[doc = "While incrementing scalar is less than or equal to"]
44034#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u32])"]
44035#[inline(always)]
44036#[target_feature(enable = "sve")]
44037#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44038#[cfg_attr(test, assert_instr(whilels))]
44039pub fn svwhilele_b32_u32(op1: u32, op2: u32) -> svbool_t {
44040    unsafe extern "unadjusted" {
44041        #[cfg_attr(
44042            target_arch = "aarch64",
44043            link_name = "llvm.aarch64.sve.whilels.nxv4i1.i32"
44044        )]
44045        fn _svwhilele_b32_u32(op1: i32, op2: i32) -> svbool4_t;
44046    }
44047    unsafe { _svwhilele_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() }
44048}
44049#[doc = "While incrementing scalar is less than or equal to"]
44050#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u32])"]
44051#[inline(always)]
44052#[target_feature(enable = "sve")]
44053#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44054#[cfg_attr(test, assert_instr(whilels))]
44055pub fn svwhilele_b64_u32(op1: u32, op2: u32) -> svbool_t {
44056    unsafe extern "unadjusted" {
44057        #[cfg_attr(
44058            target_arch = "aarch64",
44059            link_name = "llvm.aarch64.sve.whilels.nxv2i1.i32"
44060        )]
44061        fn _svwhilele_b64_u32(op1: i32, op2: i32) -> svbool2_t;
44062    }
44063    unsafe { _svwhilele_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() }
44064}
44065#[doc = "While incrementing scalar is less than or equal to"]
44066#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u64])"]
44067#[inline(always)]
44068#[target_feature(enable = "sve")]
44069#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44070#[cfg_attr(test, assert_instr(whilels))]
44071pub fn svwhilele_b8_u64(op1: u64, op2: u64) -> svbool_t {
44072    unsafe extern "unadjusted" {
44073        #[cfg_attr(
44074            target_arch = "aarch64",
44075            link_name = "llvm.aarch64.sve.whilels.nxv16i1.i64"
44076        )]
44077        fn _svwhilele_b8_u64(op1: i64, op2: i64) -> svbool_t;
44078    }
44079    unsafe { _svwhilele_b8_u64(op1.as_signed(), op2.as_signed()) }
44080}
44081#[doc = "While incrementing scalar is less than or equal to"]
44082#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u64])"]
44083#[inline(always)]
44084#[target_feature(enable = "sve")]
44085#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44086#[cfg_attr(test, assert_instr(whilels))]
44087pub fn svwhilele_b16_u64(op1: u64, op2: u64) -> svbool_t {
44088    unsafe extern "unadjusted" {
44089        #[cfg_attr(
44090            target_arch = "aarch64",
44091            link_name = "llvm.aarch64.sve.whilels.nxv8i1.i64"
44092        )]
44093        fn _svwhilele_b16_u64(op1: i64, op2: i64) -> svbool8_t;
44094    }
44095    unsafe { _svwhilele_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() }
44096}
44097#[doc = "While incrementing scalar is less than or equal to"]
44098#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u64])"]
44099#[inline(always)]
44100#[target_feature(enable = "sve")]
44101#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44102#[cfg_attr(test, assert_instr(whilels))]
44103pub fn svwhilele_b32_u64(op1: u64, op2: u64) -> svbool_t {
44104    unsafe extern "unadjusted" {
44105        #[cfg_attr(
44106            target_arch = "aarch64",
44107            link_name = "llvm.aarch64.sve.whilels.nxv4i1.i64"
44108        )]
44109        fn _svwhilele_b32_u64(op1: i64, op2: i64) -> svbool4_t;
44110    }
44111    unsafe { _svwhilele_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() }
44112}
44113#[doc = "While incrementing scalar is less than or equal to"]
44114#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u64])"]
44115#[inline(always)]
44116#[target_feature(enable = "sve")]
44117#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44118#[cfg_attr(test, assert_instr(whilels))]
44119pub fn svwhilele_b64_u64(op1: u64, op2: u64) -> svbool_t {
44120    unsafe extern "unadjusted" {
44121        #[cfg_attr(
44122            target_arch = "aarch64",
44123            link_name = "llvm.aarch64.sve.whilels.nxv2i1.i64"
44124        )]
44125        fn _svwhilele_b64_u64(op1: i64, op2: i64) -> svbool2_t;
44126    }
44127    unsafe { _svwhilele_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() }
44128}
44129#[doc = "While incrementing scalar is less than"]
44130#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s32])"]
44131#[inline(always)]
44132#[target_feature(enable = "sve")]
44133#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44134#[cfg_attr(test, assert_instr(whilelt))]
44135pub fn svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t {
44136    unsafe extern "unadjusted" {
44137        #[cfg_attr(
44138            target_arch = "aarch64",
44139            link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i32"
44140        )]
44141        fn _svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t;
44142    }
44143    unsafe { _svwhilelt_b8_s32(op1, op2) }
44144}
44145#[doc = "While incrementing scalar is less than"]
44146#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s32])"]
44147#[inline(always)]
44148#[target_feature(enable = "sve")]
44149#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44150#[cfg_attr(test, assert_instr(whilelt))]
44151pub fn svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool_t {
44152    unsafe extern "unadjusted" {
44153        #[cfg_attr(
44154            target_arch = "aarch64",
44155            link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i32"
44156        )]
44157        fn _svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool8_t;
44158    }
44159    unsafe { _svwhilelt_b16_s32(op1, op2).sve_into() }
44160}
44161#[doc = "While incrementing scalar is less than"]
44162#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s32])"]
44163#[inline(always)]
44164#[target_feature(enable = "sve")]
44165#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44166#[cfg_attr(test, assert_instr(whilelt))]
44167pub fn svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool_t {
44168    unsafe extern "unadjusted" {
44169        #[cfg_attr(
44170            target_arch = "aarch64",
44171            link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i32"
44172        )]
44173        fn _svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool4_t;
44174    }
44175    unsafe { _svwhilelt_b32_s32(op1, op2).sve_into() }
44176}
44177#[doc = "While incrementing scalar is less than"]
44178#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s32])"]
44179#[inline(always)]
44180#[target_feature(enable = "sve")]
44181#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44182#[cfg_attr(test, assert_instr(whilelt))]
44183pub fn svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool_t {
44184    unsafe extern "unadjusted" {
44185        #[cfg_attr(
44186            target_arch = "aarch64",
44187            link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i32"
44188        )]
44189        fn _svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool2_t;
44190    }
44191    unsafe { _svwhilelt_b64_s32(op1, op2).sve_into() }
44192}
44193#[doc = "While incrementing scalar is less than"]
44194#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s64])"]
44195#[inline(always)]
44196#[target_feature(enable = "sve")]
44197#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44198#[cfg_attr(test, assert_instr(whilelt))]
44199pub fn svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t {
44200    unsafe extern "unadjusted" {
44201        #[cfg_attr(
44202            target_arch = "aarch64",
44203            link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i64"
44204        )]
44205        fn _svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t;
44206    }
44207    unsafe { _svwhilelt_b8_s64(op1, op2) }
44208}
44209#[doc = "While incrementing scalar is less than"]
44210#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s64])"]
44211#[inline(always)]
44212#[target_feature(enable = "sve")]
44213#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44214#[cfg_attr(test, assert_instr(whilelt))]
44215pub fn svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool_t {
44216    unsafe extern "unadjusted" {
44217        #[cfg_attr(
44218            target_arch = "aarch64",
44219            link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i64"
44220        )]
44221        fn _svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool8_t;
44222    }
44223    unsafe { _svwhilelt_b16_s64(op1, op2).sve_into() }
44224}
44225#[doc = "While incrementing scalar is less than"]
44226#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s64])"]
44227#[inline(always)]
44228#[target_feature(enable = "sve")]
44229#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44230#[cfg_attr(test, assert_instr(whilelt))]
44231pub fn svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool_t {
44232    unsafe extern "unadjusted" {
44233        #[cfg_attr(
44234            target_arch = "aarch64",
44235            link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i64"
44236        )]
44237        fn _svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool4_t;
44238    }
44239    unsafe { _svwhilelt_b32_s64(op1, op2).sve_into() }
44240}
44241#[doc = "While incrementing scalar is less than"]
44242#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s64])"]
44243#[inline(always)]
44244#[target_feature(enable = "sve")]
44245#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44246#[cfg_attr(test, assert_instr(whilelt))]
44247pub fn svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool_t {
44248    unsafe extern "unadjusted" {
44249        #[cfg_attr(
44250            target_arch = "aarch64",
44251            link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i64"
44252        )]
44253        fn _svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool2_t;
44254    }
44255    unsafe { _svwhilelt_b64_s64(op1, op2).sve_into() }
44256}
44257#[doc = "While incrementing scalar is less than"]
44258#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u32])"]
44259#[inline(always)]
44260#[target_feature(enable = "sve")]
44261#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44262#[cfg_attr(test, assert_instr(whilelo))]
44263pub fn svwhilelt_b8_u32(op1: u32, op2: u32) -> svbool_t {
44264    unsafe extern "unadjusted" {
44265        #[cfg_attr(
44266            target_arch = "aarch64",
44267            link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i32"
44268        )]
44269        fn _svwhilelt_b8_u32(op1: i32, op2: i32) -> svbool_t;
44270    }
44271    unsafe { _svwhilelt_b8_u32(op1.as_signed(), op2.as_signed()) }
44272}
44273#[doc = "While incrementing scalar is less than"]
44274#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u32])"]
44275#[inline(always)]
44276#[target_feature(enable = "sve")]
44277#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44278#[cfg_attr(test, assert_instr(whilelo))]
44279pub fn svwhilelt_b16_u32(op1: u32, op2: u32) -> svbool_t {
44280    unsafe extern "unadjusted" {
44281        #[cfg_attr(
44282            target_arch = "aarch64",
44283            link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i32"
44284        )]
44285        fn _svwhilelt_b16_u32(op1: i32, op2: i32) -> svbool8_t;
44286    }
44287    unsafe { _svwhilelt_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() }
44288}
44289#[doc = "While incrementing scalar is less than"]
44290#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u32])"]
44291#[inline(always)]
44292#[target_feature(enable = "sve")]
44293#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44294#[cfg_attr(test, assert_instr(whilelo))]
44295pub fn svwhilelt_b32_u32(op1: u32, op2: u32) -> svbool_t {
44296    unsafe extern "unadjusted" {
44297        #[cfg_attr(
44298            target_arch = "aarch64",
44299            link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i32"
44300        )]
44301        fn _svwhilelt_b32_u32(op1: i32, op2: i32) -> svbool4_t;
44302    }
44303    unsafe { _svwhilelt_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() }
44304}
44305#[doc = "While incrementing scalar is less than"]
44306#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u32])"]
44307#[inline(always)]
44308#[target_feature(enable = "sve")]
44309#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44310#[cfg_attr(test, assert_instr(whilelo))]
44311pub fn svwhilelt_b64_u32(op1: u32, op2: u32) -> svbool_t {
44312    unsafe extern "unadjusted" {
44313        #[cfg_attr(
44314            target_arch = "aarch64",
44315            link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i32"
44316        )]
44317        fn _svwhilelt_b64_u32(op1: i32, op2: i32) -> svbool2_t;
44318    }
44319    unsafe { _svwhilelt_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() }
44320}
44321#[doc = "While incrementing scalar is less than"]
44322#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u64])"]
44323#[inline(always)]
44324#[target_feature(enable = "sve")]
44325#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44326#[cfg_attr(test, assert_instr(whilelo))]
44327pub fn svwhilelt_b8_u64(op1: u64, op2: u64) -> svbool_t {
44328    unsafe extern "unadjusted" {
44329        #[cfg_attr(
44330            target_arch = "aarch64",
44331            link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i64"
44332        )]
44333        fn _svwhilelt_b8_u64(op1: i64, op2: i64) -> svbool_t;
44334    }
44335    unsafe { _svwhilelt_b8_u64(op1.as_signed(), op2.as_signed()) }
44336}
44337#[doc = "While incrementing scalar is less than"]
44338#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u64])"]
44339#[inline(always)]
44340#[target_feature(enable = "sve")]
44341#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44342#[cfg_attr(test, assert_instr(whilelo))]
44343pub fn svwhilelt_b16_u64(op1: u64, op2: u64) -> svbool_t {
44344    unsafe extern "unadjusted" {
44345        #[cfg_attr(
44346            target_arch = "aarch64",
44347            link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i64"
44348        )]
44349        fn _svwhilelt_b16_u64(op1: i64, op2: i64) -> svbool8_t;
44350    }
44351    unsafe { _svwhilelt_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() }
44352}
44353#[doc = "While incrementing scalar is less than"]
44354#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u64])"]
44355#[inline(always)]
44356#[target_feature(enable = "sve")]
44357#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44358#[cfg_attr(test, assert_instr(whilelo))]
44359pub fn svwhilelt_b32_u64(op1: u64, op2: u64) -> svbool_t {
44360    unsafe extern "unadjusted" {
44361        #[cfg_attr(
44362            target_arch = "aarch64",
44363            link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i64"
44364        )]
44365        fn _svwhilelt_b32_u64(op1: i64, op2: i64) -> svbool4_t;
44366    }
44367    unsafe { _svwhilelt_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() }
44368}
44369#[doc = "While incrementing scalar is less than"]
44370#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u64])"]
44371#[inline(always)]
44372#[target_feature(enable = "sve")]
44373#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44374#[cfg_attr(test, assert_instr(whilelo))]
44375pub fn svwhilelt_b64_u64(op1: u64, op2: u64) -> svbool_t {
44376    unsafe extern "unadjusted" {
44377        #[cfg_attr(
44378            target_arch = "aarch64",
44379            link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i64"
44380        )]
44381        fn _svwhilelt_b64_u64(op1: i64, op2: i64) -> svbool2_t;
44382    }
44383    unsafe { _svwhilelt_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() }
44384}
44385#[doc = "Write to the first-fault register"]
44386#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwrffr)"]
44387#[inline(always)]
44388#[target_feature(enable = "sve")]
44389#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44390#[cfg_attr(test, assert_instr(wrffr))]
44391pub fn svwrffr(op: svbool_t) {
44392    unsafe extern "unadjusted" {
44393        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.wrffr")]
44394        fn _svwrffr(op: svbool_t);
44395    }
44396    unsafe { _svwrffr(op) }
44397}
44398#[doc = "Interleave elements from low halves of two inputs"]
44399#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b8)"]
44400#[inline(always)]
44401#[target_feature(enable = "sve")]
44402#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44403#[cfg_attr(test, assert_instr(zip1))]
44404pub fn svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
44405    unsafe extern "unadjusted" {
44406        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i1")]
44407        fn _svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
44408    }
44409    unsafe { _svzip1_b8(op1, op2) }
44410}
44411#[doc = "Interleave elements from low halves of two inputs"]
44412#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b16)"]
44413#[inline(always)]
44414#[target_feature(enable = "sve")]
44415#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44416#[cfg_attr(test, assert_instr(zip1))]
44417pub fn svzip1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
44418    unsafe extern "unadjusted" {
44419        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i1")]
44420        fn _svzip1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
44421    }
44422    unsafe { _svzip1_b16(op1.sve_into(), op2.sve_into()).sve_into() }
44423}
44424#[doc = "Interleave elements from low halves of two inputs"]
44425#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b32)"]
44426#[inline(always)]
44427#[target_feature(enable = "sve")]
44428#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44429#[cfg_attr(test, assert_instr(zip1))]
44430pub fn svzip1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
44431    unsafe extern "unadjusted" {
44432        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i1")]
44433        fn _svzip1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
44434    }
44435    unsafe { _svzip1_b32(op1.sve_into(), op2.sve_into()).sve_into() }
44436}
44437#[doc = "Interleave elements from low halves of two inputs"]
44438#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b64)"]
44439#[inline(always)]
44440#[target_feature(enable = "sve")]
44441#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44442#[cfg_attr(test, assert_instr(zip1))]
44443pub fn svzip1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
44444    unsafe extern "unadjusted" {
44445        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i1")]
44446        fn _svzip1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
44447    }
44448    unsafe { _svzip1_b64(op1.sve_into(), op2.sve_into()).sve_into() }
44449}
44450#[doc = "Interleave elements from low halves of two inputs"]
44451#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f32])"]
44452#[inline(always)]
44453#[target_feature(enable = "sve")]
44454#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44455#[cfg_attr(test, assert_instr(zip1))]
44456pub fn svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
44457    unsafe extern "unadjusted" {
44458        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4f32")]
44459        fn _svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
44460    }
44461    unsafe { _svzip1_f32(op1, op2) }
44462}
44463#[doc = "Interleave elements from low halves of two inputs"]
44464#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f64])"]
44465#[inline(always)]
44466#[target_feature(enable = "sve")]
44467#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44468#[cfg_attr(test, assert_instr(zip1))]
44469pub fn svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
44470    unsafe extern "unadjusted" {
44471        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2f64")]
44472        fn _svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
44473    }
44474    unsafe { _svzip1_f64(op1, op2) }
44475}
44476#[doc = "Interleave elements from low halves of two inputs"]
44477#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s8])"]
44478#[inline(always)]
44479#[target_feature(enable = "sve")]
44480#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44481#[cfg_attr(test, assert_instr(zip1))]
44482pub fn svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
44483    unsafe extern "unadjusted" {
44484        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i8")]
44485        fn _svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
44486    }
44487    unsafe { _svzip1_s8(op1, op2) }
44488}
44489#[doc = "Interleave elements from low halves of two inputs"]
44490#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s16])"]
44491#[inline(always)]
44492#[target_feature(enable = "sve")]
44493#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44494#[cfg_attr(test, assert_instr(zip1))]
44495pub fn svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
44496    unsafe extern "unadjusted" {
44497        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i16")]
44498        fn _svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
44499    }
44500    unsafe { _svzip1_s16(op1, op2) }
44501}
44502#[doc = "Interleave elements from low halves of two inputs"]
44503#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s32])"]
44504#[inline(always)]
44505#[target_feature(enable = "sve")]
44506#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44507#[cfg_attr(test, assert_instr(zip1))]
44508pub fn svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
44509    unsafe extern "unadjusted" {
44510        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i32")]
44511        fn _svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
44512    }
44513    unsafe { _svzip1_s32(op1, op2) }
44514}
44515#[doc = "Interleave elements from low halves of two inputs"]
44516#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s64])"]
44517#[inline(always)]
44518#[target_feature(enable = "sve")]
44519#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44520#[cfg_attr(test, assert_instr(zip1))]
44521pub fn svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
44522    unsafe extern "unadjusted" {
44523        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i64")]
44524        fn _svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
44525    }
44526    unsafe { _svzip1_s64(op1, op2) }
44527}
44528#[doc = "Interleave elements from low halves of two inputs"]
44529#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u8])"]
44530#[inline(always)]
44531#[target_feature(enable = "sve")]
44532#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44533#[cfg_attr(test, assert_instr(zip1))]
44534pub fn svzip1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
44535    unsafe { svzip1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
44536}
44537#[doc = "Interleave elements from low halves of two inputs"]
44538#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u16])"]
44539#[inline(always)]
44540#[target_feature(enable = "sve")]
44541#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44542#[cfg_attr(test, assert_instr(zip1))]
44543pub fn svzip1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
44544    unsafe { svzip1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
44545}
44546#[doc = "Interleave elements from low halves of two inputs"]
44547#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u32])"]
44548#[inline(always)]
44549#[target_feature(enable = "sve")]
44550#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44551#[cfg_attr(test, assert_instr(zip1))]
44552pub fn svzip1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
44553    unsafe { svzip1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
44554}
44555#[doc = "Interleave elements from low halves of two inputs"]
44556#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u64])"]
44557#[inline(always)]
44558#[target_feature(enable = "sve")]
44559#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44560#[cfg_attr(test, assert_instr(zip1))]
44561pub fn svzip1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
44562    unsafe { svzip1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
44563}
44564#[doc = "Interleave quadwords from low halves of two inputs"]
44565#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f32])"]
44566#[inline(always)]
44567#[target_feature(enable = "sve,f64mm")]
44568#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44569#[cfg_attr(test, assert_instr(zip1))]
44570pub fn svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
44571    unsafe extern "unadjusted" {
44572        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4f32")]
44573        fn _svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
44574    }
44575    unsafe { _svzip1q_f32(op1, op2) }
44576}
44577#[doc = "Interleave quadwords from low halves of two inputs"]
44578#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f64])"]
44579#[inline(always)]
44580#[target_feature(enable = "sve,f64mm")]
44581#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44582#[cfg_attr(test, assert_instr(zip1))]
44583pub fn svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
44584    unsafe extern "unadjusted" {
44585        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2f64")]
44586        fn _svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
44587    }
44588    unsafe { _svzip1q_f64(op1, op2) }
44589}
44590#[doc = "Interleave quadwords from low halves of two inputs"]
44591#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s8])"]
44592#[inline(always)]
44593#[target_feature(enable = "sve,f64mm")]
44594#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44595#[cfg_attr(test, assert_instr(zip1))]
44596pub fn svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
44597    unsafe extern "unadjusted" {
44598        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv16i8")]
44599        fn _svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
44600    }
44601    unsafe { _svzip1q_s8(op1, op2) }
44602}
44603#[doc = "Interleave quadwords from low halves of two inputs"]
44604#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s16])"]
44605#[inline(always)]
44606#[target_feature(enable = "sve,f64mm")]
44607#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44608#[cfg_attr(test, assert_instr(zip1))]
44609pub fn svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
44610    unsafe extern "unadjusted" {
44611        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv8i16")]
44612        fn _svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
44613    }
44614    unsafe { _svzip1q_s16(op1, op2) }
44615}
44616#[doc = "Interleave quadwords from low halves of two inputs"]
44617#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s32])"]
44618#[inline(always)]
44619#[target_feature(enable = "sve,f64mm")]
44620#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44621#[cfg_attr(test, assert_instr(zip1))]
44622pub fn svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
44623    unsafe extern "unadjusted" {
44624        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4i32")]
44625        fn _svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
44626    }
44627    unsafe { _svzip1q_s32(op1, op2) }
44628}
44629#[doc = "Interleave quadwords from low halves of two inputs"]
44630#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s64])"]
44631#[inline(always)]
44632#[target_feature(enable = "sve,f64mm")]
44633#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44634#[cfg_attr(test, assert_instr(zip1))]
44635pub fn svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
44636    unsafe extern "unadjusted" {
44637        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2i64")]
44638        fn _svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
44639    }
44640    unsafe { _svzip1q_s64(op1, op2) }
44641}
44642#[doc = "Interleave quadwords from low halves of two inputs"]
44643#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u8])"]
44644#[inline(always)]
44645#[target_feature(enable = "sve,f64mm")]
44646#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44647#[cfg_attr(test, assert_instr(zip1))]
44648pub fn svzip1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
44649    unsafe { svzip1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
44650}
44651#[doc = "Interleave quadwords from low halves of two inputs"]
44652#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u16])"]
44653#[inline(always)]
44654#[target_feature(enable = "sve,f64mm")]
44655#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44656#[cfg_attr(test, assert_instr(zip1))]
44657pub fn svzip1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
44658    unsafe { svzip1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
44659}
44660#[doc = "Interleave quadwords from low halves of two inputs"]
44661#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u32])"]
44662#[inline(always)]
44663#[target_feature(enable = "sve,f64mm")]
44664#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44665#[cfg_attr(test, assert_instr(zip1))]
44666pub fn svzip1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
44667    unsafe { svzip1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
44668}
44669#[doc = "Interleave quadwords from low halves of two inputs"]
44670#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u64])"]
44671#[inline(always)]
44672#[target_feature(enable = "sve,f64mm")]
44673#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44674#[cfg_attr(test, assert_instr(zip1))]
44675pub fn svzip1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
44676    unsafe { svzip1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
44677}
44678#[doc = "Interleave elements from high halves of two inputs"]
44679#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b8)"]
44680#[inline(always)]
44681#[target_feature(enable = "sve")]
44682#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44683#[cfg_attr(test, assert_instr(zip2))]
44684pub fn svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
44685    unsafe extern "unadjusted" {
44686        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i1")]
44687        fn _svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
44688    }
44689    unsafe { _svzip2_b8(op1, op2) }
44690}
44691#[doc = "Interleave elements from high halves of two inputs"]
44692#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b16)"]
44693#[inline(always)]
44694#[target_feature(enable = "sve")]
44695#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44696#[cfg_attr(test, assert_instr(zip2))]
44697pub fn svzip2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
44698    unsafe extern "unadjusted" {
44699        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i1")]
44700        fn _svzip2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
44701    }
44702    unsafe { _svzip2_b16(op1.sve_into(), op2.sve_into()).sve_into() }
44703}
44704#[doc = "Interleave elements from high halves of two inputs"]
44705#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b32)"]
44706#[inline(always)]
44707#[target_feature(enable = "sve")]
44708#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44709#[cfg_attr(test, assert_instr(zip2))]
44710pub fn svzip2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
44711    unsafe extern "unadjusted" {
44712        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i1")]
44713        fn _svzip2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
44714    }
44715    unsafe { _svzip2_b32(op1.sve_into(), op2.sve_into()).sve_into() }
44716}
44717#[doc = "Interleave elements from high halves of two inputs"]
44718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b64)"]
44719#[inline(always)]
44720#[target_feature(enable = "sve")]
44721#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44722#[cfg_attr(test, assert_instr(zip2))]
44723pub fn svzip2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
44724    unsafe extern "unadjusted" {
44725        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i1")]
44726        fn _svzip2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
44727    }
44728    unsafe { _svzip2_b64(op1.sve_into(), op2.sve_into()).sve_into() }
44729}
44730#[doc = "Interleave elements from high halves of two inputs"]
44731#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f32])"]
44732#[inline(always)]
44733#[target_feature(enable = "sve")]
44734#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44735#[cfg_attr(test, assert_instr(zip2))]
44736pub fn svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
44737    unsafe extern "unadjusted" {
44738        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4f32")]
44739        fn _svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
44740    }
44741    unsafe { _svzip2_f32(op1, op2) }
44742}
44743#[doc = "Interleave elements from high halves of two inputs"]
44744#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f64])"]
44745#[inline(always)]
44746#[target_feature(enable = "sve")]
44747#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44748#[cfg_attr(test, assert_instr(zip2))]
44749pub fn svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
44750    unsafe extern "unadjusted" {
44751        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2f64")]
44752        fn _svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
44753    }
44754    unsafe { _svzip2_f64(op1, op2) }
44755}
44756#[doc = "Interleave elements from high halves of two inputs"]
44757#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s8])"]
44758#[inline(always)]
44759#[target_feature(enable = "sve")]
44760#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44761#[cfg_attr(test, assert_instr(zip2))]
44762pub fn svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
44763    unsafe extern "unadjusted" {
44764        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i8")]
44765        fn _svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
44766    }
44767    unsafe { _svzip2_s8(op1, op2) }
44768}
44769#[doc = "Interleave elements from high halves of two inputs"]
44770#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s16])"]
44771#[inline(always)]
44772#[target_feature(enable = "sve")]
44773#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44774#[cfg_attr(test, assert_instr(zip2))]
44775pub fn svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
44776    unsafe extern "unadjusted" {
44777        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i16")]
44778        fn _svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
44779    }
44780    unsafe { _svzip2_s16(op1, op2) }
44781}
44782#[doc = "Interleave elements from high halves of two inputs"]
44783#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s32])"]
44784#[inline(always)]
44785#[target_feature(enable = "sve")]
44786#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44787#[cfg_attr(test, assert_instr(zip2))]
44788pub fn svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
44789    unsafe extern "unadjusted" {
44790        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i32")]
44791        fn _svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
44792    }
44793    unsafe { _svzip2_s32(op1, op2) }
44794}
44795#[doc = "Interleave elements from high halves of two inputs"]
44796#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s64])"]
44797#[inline(always)]
44798#[target_feature(enable = "sve")]
44799#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44800#[cfg_attr(test, assert_instr(zip2))]
44801pub fn svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
44802    unsafe extern "unadjusted" {
44803        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i64")]
44804        fn _svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
44805    }
44806    unsafe { _svzip2_s64(op1, op2) }
44807}
44808#[doc = "Interleave elements from high halves of two inputs"]
44809#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u8])"]
44810#[inline(always)]
44811#[target_feature(enable = "sve")]
44812#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44813#[cfg_attr(test, assert_instr(zip2))]
44814pub fn svzip2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
44815    unsafe { svzip2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
44816}
44817#[doc = "Interleave elements from high halves of two inputs"]
44818#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u16])"]
44819#[inline(always)]
44820#[target_feature(enable = "sve")]
44821#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44822#[cfg_attr(test, assert_instr(zip2))]
44823pub fn svzip2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
44824    unsafe { svzip2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
44825}
44826#[doc = "Interleave elements from high halves of two inputs"]
44827#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u32])"]
44828#[inline(always)]
44829#[target_feature(enable = "sve")]
44830#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44831#[cfg_attr(test, assert_instr(zip2))]
44832pub fn svzip2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
44833    unsafe { svzip2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
44834}
44835#[doc = "Interleave elements from high halves of two inputs"]
44836#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u64])"]
44837#[inline(always)]
44838#[target_feature(enable = "sve")]
44839#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44840#[cfg_attr(test, assert_instr(zip2))]
44841pub fn svzip2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
44842    unsafe { svzip2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
44843}
44844#[doc = "Interleave quadwords from high halves of two inputs"]
44845#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f32])"]
44846#[inline(always)]
44847#[target_feature(enable = "sve,f64mm")]
44848#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44849#[cfg_attr(test, assert_instr(zip2))]
44850pub fn svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
44851    unsafe extern "unadjusted" {
44852        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4f32")]
44853        fn _svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
44854    }
44855    unsafe { _svzip2q_f32(op1, op2) }
44856}
44857#[doc = "Interleave quadwords from high halves of two inputs"]
44858#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f64])"]
44859#[inline(always)]
44860#[target_feature(enable = "sve,f64mm")]
44861#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44862#[cfg_attr(test, assert_instr(zip2))]
44863pub fn svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
44864    unsafe extern "unadjusted" {
44865        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2f64")]
44866        fn _svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
44867    }
44868    unsafe { _svzip2q_f64(op1, op2) }
44869}
44870#[doc = "Interleave quadwords from high halves of two inputs"]
44871#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s8])"]
44872#[inline(always)]
44873#[target_feature(enable = "sve,f64mm")]
44874#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44875#[cfg_attr(test, assert_instr(zip2))]
44876pub fn svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
44877    unsafe extern "unadjusted" {
44878        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv16i8")]
44879        fn _svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
44880    }
44881    unsafe { _svzip2q_s8(op1, op2) }
44882}
44883#[doc = "Interleave quadwords from high halves of two inputs"]
44884#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s16])"]
44885#[inline(always)]
44886#[target_feature(enable = "sve,f64mm")]
44887#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44888#[cfg_attr(test, assert_instr(zip2))]
44889pub fn svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
44890    unsafe extern "unadjusted" {
44891        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv8i16")]
44892        fn _svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
44893    }
44894    unsafe { _svzip2q_s16(op1, op2) }
44895}
44896#[doc = "Interleave quadwords from high halves of two inputs"]
44897#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s32])"]
44898#[inline(always)]
44899#[target_feature(enable = "sve,f64mm")]
44900#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44901#[cfg_attr(test, assert_instr(zip2))]
44902pub fn svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
44903    unsafe extern "unadjusted" {
44904        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4i32")]
44905        fn _svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
44906    }
44907    unsafe { _svzip2q_s32(op1, op2) }
44908}
44909#[doc = "Interleave quadwords from high halves of two inputs"]
44910#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s64])"]
44911#[inline(always)]
44912#[target_feature(enable = "sve,f64mm")]
44913#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44914#[cfg_attr(test, assert_instr(zip2))]
44915pub fn svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
44916    unsafe extern "unadjusted" {
44917        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2i64")]
44918        fn _svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
44919    }
44920    unsafe { _svzip2q_s64(op1, op2) }
44921}
44922#[doc = "Interleave quadwords from high halves of two inputs"]
44923#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u8])"]
44924#[inline(always)]
44925#[target_feature(enable = "sve,f64mm")]
44926#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44927#[cfg_attr(test, assert_instr(zip2))]
44928pub fn svzip2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
44929    unsafe { svzip2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
44930}
44931#[doc = "Interleave quadwords from high halves of two inputs"]
44932#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u16])"]
44933#[inline(always)]
44934#[target_feature(enable = "sve,f64mm")]
44935#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44936#[cfg_attr(test, assert_instr(zip2))]
44937pub fn svzip2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
44938    unsafe { svzip2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
44939}
44940#[doc = "Interleave quadwords from high halves of two inputs"]
44941#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u32])"]
44942#[inline(always)]
44943#[target_feature(enable = "sve,f64mm")]
44944#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44945#[cfg_attr(test, assert_instr(zip2))]
44946pub fn svzip2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
44947    unsafe { svzip2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
44948}
44949#[doc = "Interleave quadwords from high halves of two inputs"]
44950#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u64])"]
44951#[inline(always)]
44952#[target_feature(enable = "sve,f64mm")]
44953#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]
44954#[cfg_attr(test, assert_instr(zip2))]
44955pub fn svzip2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
44956    unsafe { svzip2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
44957}