1#[allow(improper_ctypes)]
10unsafe extern "unadjusted" {
11 #[link_name = "llvm.amdgcn.workitem.id.x"]
12 safe fn llvm_workitem_id_x() -> u32;
13 #[link_name = "llvm.amdgcn.workitem.id.y"]
14 safe fn llvm_workitem_id_y() -> u32;
15 #[link_name = "llvm.amdgcn.workitem.id.z"]
16 safe fn llvm_workitem_id_z() -> u32;
17
18 #[link_name = "llvm.amdgcn.workgroup.id.x"]
19 safe fn llvm_workgroup_id_x() -> u32;
20 #[link_name = "llvm.amdgcn.workgroup.id.y"]
21 safe fn llvm_workgroup_id_y() -> u32;
22 #[link_name = "llvm.amdgcn.workgroup.id.z"]
23 safe fn llvm_workgroup_id_z() -> u32;
24
25 #[link_name = "llvm.amdgcn.groupstaticsize"]
26 safe fn llvm_groupstaticsize() -> u32;
27 #[link_name = "llvm.amdgcn.dispatch.id"]
28 safe fn llvm_dispatch_id() -> u64;
29
30 #[link_name = "llvm.amdgcn.wavefrontsize"]
31 safe fn llvm_wavefrontsize() -> u32;
32
33 #[link_name = "llvm.amdgcn.s.barrier"]
34 safe fn llvm_s_barrier();
35 #[link_name = "llvm.amdgcn.s.barrier.signal"]
36 fn llvm_s_barrier_signal(barrier_type: i32);
37 #[link_name = "llvm.amdgcn.s.barrier.signal.isfirst"]
38 fn llvm_s_barrier_signal_isfirst(barrier_type: i32) -> bool;
39 #[link_name = "llvm.amdgcn.s.barrier.wait"]
40 fn llvm_s_barrier_wait(barrier_type: i16);
41 #[link_name = "llvm.amdgcn.s.get.barrier.state"]
42 fn llvm_s_get_barrier_state(barrier_type: i32) -> u32;
43 #[link_name = "llvm.amdgcn.wave.barrier"]
44 safe fn llvm_wave_barrier();
45 #[link_name = "llvm.amdgcn.sched.barrier"]
46 fn llvm_sched_barrier(mask: u32);
47 #[link_name = "llvm.amdgcn.sched.group.barrier"]
48 fn llvm_sched_group_barrier(mask: u32, size: u32, sync_id: u32);
49
50 #[link_name = "llvm.amdgcn.s.sleep"]
51 safe fn llvm_s_sleep(count: u32);
52
53 #[link_name = "llvm.amdgcn.s.sethalt"]
54 safe fn llvm_s_sethalt(value: u32) -> !;
55
56 #[link_name = "llvm.amdgcn.s.getpc"]
57 safe fn llvm_s_getpc() -> i64;
58
59 #[link_name = "llvm.amdgcn.mbcnt.lo"]
60 safe fn llvm_mbcnt_lo(value: u32, init: u32) -> u32;
61 #[link_name = "llvm.amdgcn.mbcnt.hi"]
62 safe fn llvm_mbcnt_hi(value: u32, init: u32) -> u32;
63
64 #[link_name = "llvm.amdgcn.ballot"]
65 safe fn llvm_ballot(b: bool) -> u64;
66
67 #[link_name = "llvm.amdgcn.inverse.ballot"]
68 safe fn llvm_inverse_ballot(value: u64) -> bool;
69
70 #[link_name = "llvm.amdgcn.wave.reduce.umin"]
71 safe fn llvm_wave_reduce_umin(value: u32, strategy: u32) -> u32;
72 #[link_name = "llvm.amdgcn.wave.reduce.min"]
73 safe fn llvm_wave_reduce_min(value: i32, strategy: u32) -> i32;
74 #[link_name = "llvm.amdgcn.wave.reduce.umax"]
75 safe fn llvm_wave_reduce_umax(value: u32, strategy: u32) -> u32;
76 #[link_name = "llvm.amdgcn.wave.reduce.max"]
77 safe fn llvm_wave_reduce_max(value: i32, strategy: u32) -> i32;
78 #[link_name = "llvm.amdgcn.wave.reduce.add"]
79 safe fn llvm_wave_reduce_add(value: u32, strategy: u32) -> u32;
80 #[link_name = "llvm.amdgcn.wave.reduce.and"]
81 safe fn llvm_wave_reduce_and(value: u32, strategy: u32) -> u32;
82 #[link_name = "llvm.amdgcn.wave.reduce.or"]
83 safe fn llvm_wave_reduce_or(value: u32, strategy: u32) -> u32;
84 #[link_name = "llvm.amdgcn.wave.reduce.xor"]
85 safe fn llvm_wave_reduce_xor(value: u32, strategy: u32) -> u32;
86
87 #[link_name = "llvm.amdgcn.readfirstlane.i32"]
90 safe fn llvm_readfirstlane_u32(value: u32) -> u32;
91 #[link_name = "llvm.amdgcn.readfirstlane.i64"]
92 safe fn llvm_readfirstlane_u64(value: u64) -> u64;
93 #[link_name = "llvm.amdgcn.readlane.i32"]
94 fn llvm_readlane_u32(value: u32, lane: u32) -> u32;
95 #[link_name = "llvm.amdgcn.readlane.i64"]
96 fn llvm_readlane_u64(value: u64, lane: u32) -> u64;
97 #[link_name = "llvm.amdgcn.writelane.i32"]
98 fn llvm_writelane_u32(value: u32, lane: u32, default: u32) -> u32;
99 #[link_name = "llvm.amdgcn.writelane.i64"]
100 fn llvm_writelane_u64(value: u64, lane: u32, default: u64) -> u64;
101
102 #[link_name = "llvm.amdgcn.endpgm"]
103 safe fn llvm_endpgm() -> !;
104
105 #[link_name = "llvm.amdgcn.update.dpp.i32"]
106 fn llvm_update_dpp(
107 old: u32,
108 src: u32,
109 dpp_ctrl: u32,
110 row_mask: u32,
111 bank_mask: u32,
112 bound_control: bool,
113 ) -> u32;
114
115 #[link_name = "llvm.amdgcn.s.memrealtime"]
116 safe fn llvm_s_memrealtime() -> u64;
117
118 #[link_name = "llvm.amdgcn.ds.permute"]
119 fn llvm_ds_permute(lane: u32, value: u32) -> u32;
120 #[link_name = "llvm.amdgcn.ds.bpermute"]
121 fn llvm_ds_bpermute(lane: u32, value: u32) -> u32;
122 #[link_name = "llvm.amdgcn.perm"]
123 fn llvm_perm(src0: u32, src1: u32, selector: u32) -> u32;
124
125 #[link_name = "llvm.amdgcn.permlane16.i32"]
127 fn llvm_permlane16_u32(
128 old: u32,
129 src0: u32,
130 src1: u32,
131 src2: u32,
132 fi: bool,
133 bound_control: bool,
134 ) -> u32;
135
136 #[link_name = "llvm.amdgcn.permlanex16.i32"]
138 fn llvm_permlanex16_u32(
139 old: u32,
140 src0: u32,
141 src1: u32,
142 src2: u32,
143 fi: bool,
144 bound_control: bool,
145 ) -> u32;
146
147 #[link_name = "llvm.amdgcn.s.get.waveid.in.workgroup"]
148 safe fn llvm_s_get_waveid_in_workgroup() -> u32;
149
150 #[link_name = "llvm.amdgcn.permlane64.i32"]
152 fn llvm_permlane64_u32(value: u32) -> u32;
153
154 #[link_name = "llvm.amdgcn.permlane16.var"]
156 fn llvm_permlane16_var(old: u32, src0: u32, src1: u32, fi: bool, bound_control: bool) -> u32;
157
158 #[link_name = "llvm.amdgcn.permlanex16.var"]
160 fn llvm_permlanex16_var(old: u32, src0: u32, src1: u32, fi: bool, bound_control: bool) -> u32;
161
162 #[link_name = "llvm.amdgcn.wave.id"]
163 safe fn llvm_wave_id() -> u32;
164
165 #[link_name = "llvm.amdgcn.permlane16.swap"]
167 fn llvm_permlane16_swap(
168 vdst_old: u32,
169 vsrc_src0: u32,
170 fi: bool,
171 bound_control: bool,
172 ) -> (u32, u32);
173
174 #[link_name = "llvm.amdgcn.permlane32.swap"]
176 fn llvm_permlane32_swap(
177 vdst_old: u32,
178 vsrc_src0: u32,
179 fi: bool,
180 bound_control: bool,
181 ) -> (u32, u32);
182}
183
184#[inline]
186#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
187pub fn workitem_id_x() -> u32 {
188 llvm_workitem_id_x()
189}
190#[inline]
192#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
193pub fn workitem_id_y() -> u32 {
194 llvm_workitem_id_y()
195}
196#[inline]
198#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
199pub fn workitem_id_z() -> u32 {
200 llvm_workitem_id_z()
201}
202
203#[inline]
205#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
206pub fn workgroup_id_x() -> u32 {
207 llvm_workgroup_id_x()
208}
209#[inline]
211#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
212pub fn workgroup_id_y() -> u32 {
213 llvm_workgroup_id_y()
214}
215#[inline]
217#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
218pub fn workgroup_id_z() -> u32 {
219 llvm_workgroup_id_z()
220}
221
222#[inline]
224#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
225pub fn groupstaticsize() -> u32 {
226 llvm_groupstaticsize()
227}
228#[inline]
230#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
231pub fn dispatch_id() -> u64 {
232 llvm_dispatch_id()
233}
234
235#[inline]
239#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
240pub fn wavefrontsize() -> u32 {
241 llvm_wavefrontsize()
242}
243
244#[doc = include_str!("intrinsic_is_convergent.md")]
249#[inline]
250#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
251pub fn s_barrier() {
252 llvm_s_barrier()
253}
254
255#[doc = include_str!("intrinsic_is_convergent.md")]
260#[inline]
261#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
262pub unsafe fn s_barrier_signal<const BARRIER_TYPE: i32>() {
263 unsafe { llvm_s_barrier_signal(BARRIER_TYPE) }
264}
265
266#[doc = include_str!("intrinsic_is_convergent.md")]
274#[inline]
275#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
276pub unsafe fn s_barrier_signal_isfirst<const BARRIER_TYPE: i32>() -> bool {
277 unsafe { llvm_s_barrier_signal_isfirst(BARRIER_TYPE) }
278}
279
280#[doc = include_str!("intrinsic_is_convergent.md")]
285#[inline]
286#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
287pub unsafe fn s_barrier_wait<const BARRIER_TYPE: i16>() {
288 unsafe { llvm_s_barrier_wait(BARRIER_TYPE) }
289}
290
291#[doc = include_str!("intrinsic_is_convergent.md")]
296#[inline]
297#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
298pub unsafe fn s_get_barrier_state<const BARRIER_TYPE: i32>() -> u32 {
299 unsafe { llvm_s_get_barrier_state(BARRIER_TYPE) }
300}
301
302#[doc = include_str!("intrinsic_is_convergent.md")]
307#[inline]
308#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
309pub fn wave_barrier() {
310 llvm_wave_barrier()
311}
312
313#[doc = include_str!("intrinsic_is_convergent.md")]
332#[inline]
333#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
334pub unsafe fn sched_barrier<const MASK: u32>() {
335 static_assert_uimm_bits!(MASK, 11);
336 unsafe { llvm_sched_barrier(MASK) }
337}
338
339#[doc = include_str!("intrinsic_is_convergent.md")]
364#[inline]
365#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
366pub unsafe fn sched_group_barrier<const MASK: u32, const SIZE: u32, const SYNC_ID: u32>() {
367 static_assert_uimm_bits!(MASK, 11);
368 unsafe { llvm_sched_group_barrier(MASK, SIZE, SYNC_ID) }
369}
370
371#[inline]
377#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
378pub fn s_sleep<const COUNT: u32>() {
379 llvm_s_sleep(COUNT)
380}
381
382#[doc = include_str!("intrinsic_is_convergent.md")]
387#[inline]
388#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
389pub fn s_sethalt<const VALUE: u32>() -> ! {
390 static_assert_uimm_bits!(VALUE, 3);
391 llvm_s_sethalt(VALUE)
392}
393
394#[inline]
400#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
401pub fn s_getpc() -> i64 {
402 llvm_s_getpc()
403}
404
405#[inline]
411#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
412pub fn mbcnt_lo(value: u32, init: u32) -> u32 {
413 llvm_mbcnt_lo(value, init)
414}
415#[inline]
421#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
422pub fn mbcnt_hi(value: u32, init: u32) -> u32 {
423 llvm_mbcnt_hi(value, init)
424}
425
426#[doc = include_str!("intrinsic_is_convergent.md")]
430#[inline]
431#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
432pub fn ballot(b: bool) -> u64 {
433 llvm_ballot(b)
434}
435
436#[doc = include_str!("intrinsic_is_convergent.md")]
444#[inline]
445#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
446pub fn inverse_ballot(value: u64) -> bool {
447 llvm_inverse_ballot(value)
448}
449
450#[doc = include_str!("intrinsic_is_convergent.md")]
460#[inline]
461#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
462pub fn wave_reduce_umin<const STRATEGY: u32>(value: u32) -> u32 {
463 static_assert!(STRATEGY <= 2);
464 llvm_wave_reduce_umin(value, STRATEGY)
465}
466#[doc = include_str!("intrinsic_is_convergent.md")]
476#[inline]
477#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
478pub fn wave_reduce_min<const STRATEGY: u32>(value: i32) -> i32 {
479 static_assert!(STRATEGY <= 2);
480 llvm_wave_reduce_min(value, STRATEGY)
481}
482
483#[doc = include_str!("intrinsic_is_convergent.md")]
493#[inline]
494#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
495pub fn wave_reduce_umax<const STRATEGY: u32>(value: u32) -> u32 {
496 static_assert!(STRATEGY <= 2);
497 llvm_wave_reduce_umax(value, STRATEGY)
498}
499#[doc = include_str!("intrinsic_is_convergent.md")]
509#[inline]
510#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
511pub fn wave_reduce_max<const STRATEGY: u32>(value: i32) -> i32 {
512 static_assert!(STRATEGY <= 2);
513 llvm_wave_reduce_max(value, STRATEGY)
514}
515
516#[doc = include_str!("intrinsic_is_convergent.md")]
526#[inline]
527#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
528pub fn wave_reduce_add<const STRATEGY: u32>(value: u32) -> u32 {
529 static_assert!(STRATEGY <= 2);
530 llvm_wave_reduce_add(value, STRATEGY)
531}
532
533#[doc = include_str!("intrinsic_is_convergent.md")]
543#[inline]
544#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
545pub fn wave_reduce_and<const STRATEGY: u32>(value: u32) -> u32 {
546 static_assert!(STRATEGY <= 2);
547 llvm_wave_reduce_and(value, STRATEGY)
548}
549#[doc = include_str!("intrinsic_is_convergent.md")]
559#[inline]
560#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
561pub fn wave_reduce_or<const STRATEGY: u32>(value: u32) -> u32 {
562 static_assert!(STRATEGY <= 2);
563 llvm_wave_reduce_or(value, STRATEGY)
564}
565#[doc = include_str!("intrinsic_is_convergent.md")]
575#[inline]
576#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
577pub fn wave_reduce_xor<const STRATEGY: u32>(value: u32) -> u32 {
578 static_assert!(STRATEGY <= 2);
579 llvm_wave_reduce_xor(value, STRATEGY)
580}
581
582#[doc = include_str!("intrinsic_is_convergent.md")]
587#[inline]
588#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
589pub fn readfirstlane_u32(value: u32) -> u32 {
590 llvm_readfirstlane_u32(value)
591}
592#[doc = include_str!("intrinsic_is_convergent.md")]
595#[inline]
596#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
597pub fn readfirstlane_u64(value: u64) -> u64 {
598 llvm_readfirstlane_u64(value)
599}
600#[doc = include_str!("intrinsic_is_convergent.md")]
606#[inline]
607#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
608pub unsafe fn readlane_u32(value: u32, lane: u32) -> u32 {
609 unsafe { llvm_readlane_u32(value, lane) }
610}
611#[doc = include_str!("intrinsic_is_convergent.md")]
617#[inline]
618#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
619pub unsafe fn readlane_u64(value: u64, lane: u32) -> u64 {
620 unsafe { llvm_readlane_u64(value, lane) }
621}
622#[doc = include_str!("intrinsic_is_convergent.md")]
633#[inline]
634#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
635pub unsafe fn writelane_u32(value: u32, lane: u32, default: u32) -> u32 {
636 unsafe { llvm_writelane_u32(value, lane, default) }
637}
638#[doc = include_str!("intrinsic_is_convergent.md")]
649#[inline]
650#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
651pub unsafe fn writelane_u64(value: u64, lane: u32, default: u64) -> u64 {
652 unsafe { llvm_writelane_u64(value, lane, default) }
653}
654
655#[doc = include_str!("intrinsic_is_convergent.md")]
660#[inline]
661#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
662pub fn endpgm() -> ! {
663 llvm_endpgm()
664}
665
666#[doc = include_str!("intrinsic_is_convergent.md")]
678#[inline]
679#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
680pub unsafe fn update_dpp<
681 const DPP_CTRL: u32,
682 const ROW_MASK: u32,
683 const BANK_MASK: u32,
684 const BOUND_CONTROL: bool,
685>(
686 old: u32,
687 src: u32,
688) -> u32 {
689 unsafe { llvm_update_dpp(old, src, DPP_CTRL, ROW_MASK, BANK_MASK, BOUND_CONTROL) }
690}
691
692#[inline]
697#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
698pub fn s_memrealtime() -> u64 {
699 llvm_s_memrealtime()
700}
701
702#[doc = include_str!("intrinsic_is_convergent.md")]
710#[inline]
711#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
712pub unsafe fn ds_permute(lane: u32, value: u32) -> u32 {
713 unsafe { llvm_ds_permute(lane, value) }
714}
715#[doc = include_str!("intrinsic_is_convergent.md")]
722#[inline]
723#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
724pub unsafe fn ds_bpermute(lane: u32, value: u32) -> u32 {
725 unsafe { llvm_ds_bpermute(lane, value) }
726}
727#[inline]
731#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
732pub unsafe fn perm(src0: u32, src1: u32, selector: u32) -> u32 {
733 unsafe { llvm_perm(src0, src1, selector) }
734}
735
736#[doc = include_str!("intrinsic_is_convergent.md")]
743#[inline]
744#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
745pub unsafe fn permlane16_u32<const FI: bool, const BOUND_CONTROL: bool>(
746 old: u32,
747 src0: u32,
748 src1: u32,
749 src2: u32,
750) -> u32 {
751 unsafe { llvm_permlane16_u32(old, src0, src1, src2, FI, BOUND_CONTROL) }
752}
753
754#[doc = include_str!("intrinsic_is_convergent.md")]
761#[inline]
762#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
763pub unsafe fn permlanex16_u32<const FI: bool, const BOUND_CONTROL: bool>(
764 old: u32,
765 src0: u32,
766 src1: u32,
767 src2: u32,
768) -> u32 {
769 unsafe { llvm_permlanex16_u32(old, src0, src1, src2, FI, BOUND_CONTROL) }
770}
771
772#[inline]
774#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
775pub fn s_get_waveid_in_workgroup() -> u32 {
776 llvm_s_get_waveid_in_workgroup()
777}
778
779#[doc = include_str!("intrinsic_is_convergent.md")]
785#[inline]
786#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
787pub unsafe fn permlane64_u32(value: u32) -> u32 {
788 unsafe { llvm_permlane64_u32(value) }
789}
790
791#[doc = include_str!("intrinsic_is_convergent.md")]
797#[inline]
798#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
799pub unsafe fn permlane16_var<const FI: bool, const BOUND_CONTROL: bool>(
800 old: u32,
801 src0: u32,
802 src1: u32,
803) -> u32 {
804 unsafe { llvm_permlane16_var(old, src0, src1, FI, BOUND_CONTROL) }
805}
806
807#[doc = include_str!("intrinsic_is_convergent.md")]
813#[inline]
814#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
815pub unsafe fn permlanex16_var<const FI: bool, const BOUND_CONTROL: bool>(
816 old: u32,
817 src0: u32,
818 src1: u32,
819) -> u32 {
820 unsafe { llvm_permlanex16_var(old, src0, src1, FI, BOUND_CONTROL) }
821}
822
823#[inline]
825#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
826pub fn wave_id() -> u32 {
827 llvm_wave_id()
828}
829
830#[doc = include_str!("intrinsic_is_convergent.md")]
839#[inline]
840#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
841pub unsafe fn permlane16_swap<const FI: bool, const BOUND_CONTROL: bool>(
842 vdst_old: u32,
843 vsrc_src0: u32,
844) -> (u32, u32) {
845 unsafe { llvm_permlane16_swap(vdst_old, vsrc_src0, FI, BOUND_CONTROL) }
846}
847
848#[doc = include_str!("intrinsic_is_convergent.md")]
857#[inline]
858#[unstable(feature = "stdarch_amdgpu", issue = "149988")]
859pub unsafe fn permlane32_swap<const FI: bool, const BOUND_CONTROL: bool>(
860 vdst_old: u32,
861 vsrc_src0: u32,
862) -> (u32, u32) {
863 unsafe { llvm_permlane32_swap(vdst_old, vsrc_src0, FI, BOUND_CONTROL) }
864}
865
866#[cfg(test)]
875mod tests {
876 use super::*;
877
878 #[unsafe(no_mangle)]
879 fn test_workitem_id_x() -> u32 {
880 workitem_id_x()
881 }
882 #[unsafe(no_mangle)]
883 fn test_workitem_id_y() -> u32 {
884 workitem_id_y()
885 }
886 #[unsafe(no_mangle)]
887 fn test_workitem_id_z() -> u32 {
888 workitem_id_z()
889 }
890
891 #[unsafe(no_mangle)]
892 fn test_workgroup_id_x() -> u32 {
893 workgroup_id_x()
894 }
895 #[unsafe(no_mangle)]
896 fn test_workgroup_id_y() -> u32 {
897 workgroup_id_y()
898 }
899 #[unsafe(no_mangle)]
900 fn test_workgroup_id_z() -> u32 {
901 workgroup_id_z()
902 }
903
904 #[unsafe(no_mangle)]
905 fn test_groupstaticsize() -> u32 {
906 groupstaticsize()
907 }
908 #[unsafe(no_mangle)]
909 fn test_dispatch_id() -> u64 {
910 dispatch_id()
911 }
912
913 #[unsafe(no_mangle)]
914 fn test_wavefrontsize() -> u32 {
915 wavefrontsize()
916 }
917
918 #[unsafe(no_mangle)]
919 fn test_s_barrier() {
920 s_barrier()
921 }
922
923 #[unsafe(no_mangle)]
924 fn test_s_barrier_signal() {
925 unsafe { s_barrier_signal::<-1>() }
926 }
927
928 #[unsafe(no_mangle)]
929 fn test_s_barrier_signal_isfirst() -> bool {
930 unsafe { s_barrier_signal_isfirst::<-1>() }
931 }
932
933 #[unsafe(no_mangle)]
934 fn test_s_barrier_wait() {
935 unsafe { s_barrier_wait::<-1>() }
936 }
937
938 #[unsafe(no_mangle)]
939 fn test_s_get_barrier_state() -> u32 {
940 unsafe { s_get_barrier_state::<-1>() }
941 }
942
943 #[unsafe(no_mangle)]
944 fn test_wave_barrier() {
945 wave_barrier()
946 }
947
948 #[unsafe(no_mangle)]
949 fn test_sched_barrier() {
950 unsafe { sched_barrier::<1>() }
951 }
952
953 #[unsafe(no_mangle)]
954 fn test_sched_group_barrier() {
955 unsafe { sched_group_barrier::<1, 1, 0>() }
956 }
957
958 #[unsafe(no_mangle)]
959 fn test_s_sleep() {
960 s_sleep::<1>()
961 }
962
963 #[unsafe(no_mangle)]
964 fn test_s_sethalt() -> ! {
965 s_sethalt::<1>()
966 }
967
968 #[unsafe(no_mangle)]
969 fn test_s_getpc() -> i64 {
970 s_getpc()
971 }
972
973 #[unsafe(no_mangle)]
974 fn test_mbcnt_lo(value: u32, init: u32) -> u32 {
975 mbcnt_lo(value, init)
976 }
977 #[unsafe(no_mangle)]
978 fn test_mbcnt_hi(value: u32, init: u32) -> u32 {
979 mbcnt_hi(value, init)
980 }
981
982 #[unsafe(no_mangle)]
983 fn test_ballot(b: bool) -> u64 {
984 ballot(b)
985 }
986
987 #[unsafe(no_mangle)]
988 fn test_inverse_ballot(value: u64) -> bool {
989 inverse_ballot(value)
990 }
991
992 #[unsafe(no_mangle)]
993 fn test_wave_reduce_umin(value: u32) -> u32 {
994 wave_reduce_umin::<0>(value)
995 }
996 #[unsafe(no_mangle)]
997 fn test_wave_reduce_min(value: i32) -> i32 {
998 wave_reduce_min::<0>(value)
999 }
1000
1001 #[unsafe(no_mangle)]
1002 fn test_wave_reduce_umax(value: u32) -> u32 {
1003 wave_reduce_umax::<0>(value)
1004 }
1005 #[unsafe(no_mangle)]
1006 fn test_wave_reduce_max(value: i32) -> i32 {
1007 wave_reduce_max::<0>(value)
1008 }
1009
1010 #[unsafe(no_mangle)]
1011 fn test_wave_reduce_add(value: u32) -> u32 {
1012 wave_reduce_add::<0>(value)
1013 }
1014
1015 #[unsafe(no_mangle)]
1016 fn test_wave_reduce_and(value: u32) -> u32 {
1017 wave_reduce_and::<0>(value)
1018 }
1019 #[unsafe(no_mangle)]
1020 fn test_wave_reduce_or(value: u32) -> u32 {
1021 wave_reduce_or::<0>(value)
1022 }
1023 #[unsafe(no_mangle)]
1024 fn test_wave_reduce_xor(value: u32) -> u32 {
1025 wave_reduce_xor::<0>(value)
1026 }
1027
1028 #[unsafe(no_mangle)]
1029 fn test_readfirstlane_u32(value: u32) -> u32 {
1030 readfirstlane_u32(value)
1031 }
1032 #[unsafe(no_mangle)]
1033 fn test_readfirstlane_u64(value: u64) -> u64 {
1034 readfirstlane_u64(value)
1035 }
1036 #[unsafe(no_mangle)]
1037 fn test_readlane_u32(value: u32, lane: u32) -> u32 {
1038 unsafe { readlane_u32(value, lane) }
1039 }
1040 #[unsafe(no_mangle)]
1041 fn test_readlane_u64(value: u64, lane: u32) -> u64 {
1042 unsafe { readlane_u64(value, lane) }
1043 }
1044 #[unsafe(no_mangle)]
1045 fn test_writelane_u32(value: u32, lane: u32, default: u32) -> u32 {
1046 unsafe { writelane_u32(value, lane, default) }
1047 }
1048 #[unsafe(no_mangle)]
1049 fn test_writelane_u64(value: u64, lane: u32, default: u64) -> u64 {
1050 unsafe { writelane_u64(value, lane, default) }
1051 }
1052
1053 #[unsafe(no_mangle)]
1054 fn test_endpgm() -> ! {
1055 endpgm()
1056 }
1057
1058 #[unsafe(no_mangle)]
1059 fn test_update_dpp(old: u32, src: u32) -> u32 {
1060 unsafe { update_dpp::<0, 0, 0, true>(old, src) }
1061 }
1062
1063 #[unsafe(no_mangle)]
1064 fn test_s_memrealtime() -> u64 {
1065 s_memrealtime()
1066 }
1067
1068 #[unsafe(no_mangle)]
1069 fn test_ds_permute(lane: u32, value: u32) -> u32 {
1070 unsafe { ds_permute(lane, value) }
1071 }
1072 #[unsafe(no_mangle)]
1073 fn test_ds_bpermute(lane: u32, value: u32) -> u32 {
1074 unsafe { ds_bpermute(lane, value) }
1075 }
1076 #[unsafe(no_mangle)]
1077 fn test_perm(src0: u32, src1: u32, selector: u32) -> u32 {
1078 unsafe { perm(src0, src1, selector) }
1079 }
1080
1081 #[unsafe(no_mangle)]
1082 fn test_permlane16_u32(old: u32, src0: u32, src1: u32, src2: u32) -> u32 {
1083 unsafe { permlane16_u32::<false, true>(old, src0, src1, src2) }
1084 }
1085
1086 #[unsafe(no_mangle)]
1087 fn test_permlanex16_u32(old: u32, src0: u32, src1: u32, src2: u32) -> u32 {
1088 unsafe { permlanex16_u32::<false, true>(old, src0, src1, src2) }
1089 }
1090
1091 #[unsafe(no_mangle)]
1092 fn test_s_get_waveid_in_workgroup() -> u32 {
1093 s_get_waveid_in_workgroup()
1094 }
1095
1096 #[unsafe(no_mangle)]
1097 fn test_permlane64_u32(value: u32) -> u32 {
1098 unsafe { permlane64_u32(value) }
1099 }
1100
1101 #[unsafe(no_mangle)]
1102 fn test_permlane16_var(old: u32, src0: u32, src1: u32) -> u32 {
1103 unsafe { permlane16_var::<false, true>(old, src0, src1) }
1104 }
1105
1106 #[unsafe(no_mangle)]
1107 fn test_permlanex16_var(old: u32, src0: u32, src1: u32) -> u32 {
1108 unsafe { permlanex16_var::<false, true>(old, src0, src1) }
1109 }
1110
1111 #[unsafe(no_mangle)]
1112 fn test_wave_id() -> u32 {
1113 wave_id()
1114 }
1115
1116 #[unsafe(no_mangle)]
1117 fn test_permlane16_swap(vdst_old: u32, vsrc_src0: u32) -> (u32, u32) {
1118 unsafe { permlane16_swap::<false, true>(vdst_old, vsrc_src0) }
1119 }
1120
1121 #[unsafe(no_mangle)]
1122 fn test_permlane32_swap(vdst_old: u32, vsrc_src0: u32) -> (u32, u32) {
1123 unsafe { permlane32_swap::<false, true>(vdst_old, vsrc_src0) }
1124 }
1125}