1use std::cmp::Ordering;
2use std::ffi::c_uint;
3use std::{assert_matches, iter, ptr};
4
5use rustc_abi::{
6 AddressSpace, Align, BackendRepr, Float, HasDataLayout, Integer, NumScalableVectors, Primitive,
7 Size, WrappingRange,
8};
9use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
10use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
11use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
12use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
13use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
14use rustc_codegen_ssa::traits::*;
15use rustc_hir as hir;
16use rustc_hir::def_id::LOCAL_CRATE;
17use rustc_hir::find_attr;
18use rustc_middle::mir::BinOp;
19use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf};
20use rustc_middle::ty::offload_meta::OffloadMetadata;
21use rustc_middle::ty::{
22 self, GenericArgsRef, Instance, SimdAlign, Ty, TyCtxt, TypingEnv, Unnormalized,
23};
24use rustc_middle::{bug, span_bug};
25use rustc_session::config::CrateType;
26use rustc_session::lint::builtin::DEPRECATED_LLVM_INTRINSIC;
27use rustc_span::{Span, Symbol, sym};
28use rustc_symbol_mangling::{mangle_internal_symbol, symbol_name_for_instance_in_crate};
29use rustc_target::callconv::PassMode;
30use rustc_target::spec::{Arch, Os};
31use tracing::debug;
32
33use crate::abi::FnAbiLlvmExt;
34use crate::builder::Builder;
35use crate::builder::autodiff::{adjust_activity_to_abi, generate_enzyme_call};
36use crate::builder::gpu_offload::{
37 OffloadKernelDims, gen_call_handling, gen_define_handling, register_offload,
38};
39use crate::context::CodegenCx;
40use crate::declare::declare_raw_fn;
41use crate::errors::{
42 AutoDiffWithoutEnable, AutoDiffWithoutLto, IntrinsicSignatureMismatch, IntrinsicWrongArch,
43 OffloadWithoutEnable, OffloadWithoutFatLTO, UnknownIntrinsic,
44};
45use crate::llvm::{self, Type, Value};
46use crate::type_of::LayoutLlvmExt;
47use crate::va_arg::emit_va_arg;
48
49fn call_simple_intrinsic<'ll, 'tcx>(
50 bx: &mut Builder<'_, 'll, 'tcx>,
51 name: Symbol,
52 args: &[OperandRef<'tcx, &'ll Value>],
53) -> Option<&'ll Value> {
54 let (base_name, type_params): (&'static str, &[&'ll Type]) = match name {
55 sym::sqrtf16 => ("llvm.sqrt", &[bx.type_f16()]),
56 sym::sqrtf32 => ("llvm.sqrt", &[bx.type_f32()]),
57 sym::sqrtf64 => ("llvm.sqrt", &[bx.type_f64()]),
58 sym::sqrtf128 => ("llvm.sqrt", &[bx.type_f128()]),
59
60 sym::powif16 => ("llvm.powi", &[bx.type_f16(), bx.type_i32()]),
61 sym::powif32 => ("llvm.powi", &[bx.type_f32(), bx.type_i32()]),
62 sym::powif64 => ("llvm.powi", &[bx.type_f64(), bx.type_i32()]),
63 sym::powif128 => ("llvm.powi", &[bx.type_f128(), bx.type_i32()]),
64
65 sym::sinf16 => ("llvm.sin", &[bx.type_f16()]),
66 sym::sinf32 => ("llvm.sin", &[bx.type_f32()]),
67 sym::sinf64 => ("llvm.sin", &[bx.type_f64()]),
68 sym::sinf128 => ("llvm.sin", &[bx.type_f128()]),
69
70 sym::cosf16 => ("llvm.cos", &[bx.type_f16()]),
71 sym::cosf32 => ("llvm.cos", &[bx.type_f32()]),
72 sym::cosf64 => ("llvm.cos", &[bx.type_f64()]),
73 sym::cosf128 => ("llvm.cos", &[bx.type_f128()]),
74
75 sym::powf16 => ("llvm.pow", &[bx.type_f16()]),
76 sym::powf32 => ("llvm.pow", &[bx.type_f32()]),
77 sym::powf64 => ("llvm.pow", &[bx.type_f64()]),
78 sym::powf128 => ("llvm.pow", &[bx.type_f128()]),
79
80 sym::expf16 => ("llvm.exp", &[bx.type_f16()]),
81 sym::expf32 => ("llvm.exp", &[bx.type_f32()]),
82 sym::expf64 => ("llvm.exp", &[bx.type_f64()]),
83 sym::expf128 => ("llvm.exp", &[bx.type_f128()]),
84
85 sym::exp2f16 => ("llvm.exp2", &[bx.type_f16()]),
86 sym::exp2f32 => ("llvm.exp2", &[bx.type_f32()]),
87 sym::exp2f64 => ("llvm.exp2", &[bx.type_f64()]),
88 sym::exp2f128 => ("llvm.exp2", &[bx.type_f128()]),
89
90 sym::logf16 => ("llvm.log", &[bx.type_f16()]),
91 sym::logf32 => ("llvm.log", &[bx.type_f32()]),
92 sym::logf64 => ("llvm.log", &[bx.type_f64()]),
93 sym::logf128 => ("llvm.log", &[bx.type_f128()]),
94
95 sym::log10f16 => ("llvm.log10", &[bx.type_f16()]),
96 sym::log10f32 => ("llvm.log10", &[bx.type_f32()]),
97 sym::log10f64 => ("llvm.log10", &[bx.type_f64()]),
98 sym::log10f128 => ("llvm.log10", &[bx.type_f128()]),
99
100 sym::log2f16 => ("llvm.log2", &[bx.type_f16()]),
101 sym::log2f32 => ("llvm.log2", &[bx.type_f32()]),
102 sym::log2f64 => ("llvm.log2", &[bx.type_f64()]),
103 sym::log2f128 => ("llvm.log2", &[bx.type_f128()]),
104
105 sym::fmaf16 => ("llvm.fma", &[bx.type_f16()]),
106 sym::fmaf32 => ("llvm.fma", &[bx.type_f32()]),
107 sym::fmaf64 => ("llvm.fma", &[bx.type_f64()]),
108 sym::fmaf128 => ("llvm.fma", &[bx.type_f128()]),
109
110 sym::fmuladdf16 => ("llvm.fmuladd", &[bx.type_f16()]),
111 sym::fmuladdf32 => ("llvm.fmuladd", &[bx.type_f32()]),
112 sym::fmuladdf64 => ("llvm.fmuladd", &[bx.type_f64()]),
113 sym::fmuladdf128 => ("llvm.fmuladd", &[bx.type_f128()]),
114
115 sym::copysignf16 => ("llvm.copysign", &[bx.type_f16()]),
130 sym::copysignf32 => ("llvm.copysign", &[bx.type_f32()]),
131 sym::copysignf64 => ("llvm.copysign", &[bx.type_f64()]),
132 sym::copysignf128 => ("llvm.copysign", &[bx.type_f128()]),
133
134 sym::floorf16 => ("llvm.floor", &[bx.type_f16()]),
135 sym::floorf32 => ("llvm.floor", &[bx.type_f32()]),
136 sym::floorf64 => ("llvm.floor", &[bx.type_f64()]),
137 sym::floorf128 => ("llvm.floor", &[bx.type_f128()]),
138
139 sym::ceilf16 => ("llvm.ceil", &[bx.type_f16()]),
140 sym::ceilf32 => ("llvm.ceil", &[bx.type_f32()]),
141 sym::ceilf64 => ("llvm.ceil", &[bx.type_f64()]),
142 sym::ceilf128 => ("llvm.ceil", &[bx.type_f128()]),
143
144 sym::truncf16 => ("llvm.trunc", &[bx.type_f16()]),
145 sym::truncf32 => ("llvm.trunc", &[bx.type_f32()]),
146 sym::truncf64 => ("llvm.trunc", &[bx.type_f64()]),
147 sym::truncf128 => ("llvm.trunc", &[bx.type_f128()]),
148
149 sym::round_ties_even_f16 => ("llvm.rint", &[bx.type_f16()]),
154 sym::round_ties_even_f32 => ("llvm.rint", &[bx.type_f32()]),
155 sym::round_ties_even_f64 => ("llvm.rint", &[bx.type_f64()]),
156 sym::round_ties_even_f128 => ("llvm.rint", &[bx.type_f128()]),
157
158 sym::roundf16 => ("llvm.round", &[bx.type_f16()]),
159 sym::roundf32 => ("llvm.round", &[bx.type_f32()]),
160 sym::roundf64 => ("llvm.round", &[bx.type_f64()]),
161 sym::roundf128 => ("llvm.round", &[bx.type_f128()]),
162
163 _ => return None,
164 };
165 Some(bx.call_intrinsic(
166 base_name,
167 type_params,
168 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
169 ))
170}
171
172impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
173 fn codegen_intrinsic_call(
174 &mut self,
175 instance: ty::Instance<'tcx>,
176 args: &[OperandRef<'tcx, &'ll Value>],
177 result: PlaceRef<'tcx, &'ll Value>,
178 span: Span,
179 ) -> Result<(), ty::Instance<'tcx>> {
180 let tcx = self.tcx;
181 let llvm_version = crate::llvm_util::get_version();
182
183 let name = tcx.item_name(instance.def_id());
184 let fn_args = instance.args;
185
186 let simple = call_simple_intrinsic(self, name, args);
187 let llval = match name {
188 _ if simple.is_some() => simple.unwrap(),
189 sym::minimum_number_nsz_f16
190 | sym::minimum_number_nsz_f32
191 | sym::minimum_number_nsz_f64
192 | sym::minimum_number_nsz_f128
193 | sym::maximum_number_nsz_f16
194 | sym::maximum_number_nsz_f32
195 | sym::maximum_number_nsz_f64
196 | sym::maximum_number_nsz_f128
197 if llvm_version >= (22, 0, 0) =>
199 {
200 let intrinsic_name = if name.as_str().starts_with("min") {
201 "llvm.minimumnum"
202 } else {
203 "llvm.maximumnum"
204 };
205 let call = self.call_intrinsic(
206 intrinsic_name,
207 &[args[0].layout.immediate_llvm_type(self.cx)],
208 &[args[0].immediate(), args[1].immediate()],
209 );
210 unsafe { llvm::LLVMRustSetNoSignedZeros(call) };
213 call
214 }
215 sym::ptr_mask => {
216 let ptr = args[0].immediate();
217 self.call_intrinsic(
218 "llvm.ptrmask",
219 &[self.val_ty(ptr), self.type_isize()],
220 &[ptr, args[1].immediate()],
221 )
222 }
223 sym::autodiff => {
224 codegen_autodiff(self, tcx, instance, args, result);
225 return Ok(());
226 }
227 sym::offload => {
228 if tcx.sess.opts.unstable_opts.offload.is_empty() {
229 let _ = tcx.dcx().emit_almost_fatal(OffloadWithoutEnable);
230 }
231
232 if tcx.sess.lto() != rustc_session::config::Lto::Fat {
233 let _ = tcx.dcx().emit_almost_fatal(OffloadWithoutFatLTO);
234 }
235
236 codegen_offload(self, tcx, instance, args);
237 return Ok(());
238 }
239 sym::is_val_statically_known => {
240 if let OperandValue::Immediate(imm) = args[0].val {
241 self.call_intrinsic(
242 "llvm.is.constant",
243 &[args[0].layout.immediate_llvm_type(self.cx)],
244 &[imm],
245 )
246 } else {
247 self.const_bool(false)
248 }
249 }
250 sym::select_unpredictable => {
251 let cond = args[0].immediate();
252 match (&args[1].layout, &args[2].layout) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(args[1].layout, args[2].layout);
253 let select = |bx: &mut Self, true_val, false_val| {
254 let result = bx.select(cond, true_val, false_val);
255 bx.set_unpredictable(&result);
256 result
257 };
258 match (args[1].val, args[2].val) {
259 (OperandValue::Ref(true_val), OperandValue::Ref(false_val)) => {
260 if !true_val.llextra.is_none() {
::core::panicking::panic("assertion failed: true_val.llextra.is_none()")
};assert!(true_val.llextra.is_none());
261 if !false_val.llextra.is_none() {
::core::panicking::panic("assertion failed: false_val.llextra.is_none()")
};assert!(false_val.llextra.is_none());
262 match (&true_val.align, &false_val.align) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(true_val.align, false_val.align);
263 let ptr = select(self, true_val.llval, false_val.llval);
264 let selected =
265 OperandValue::Ref(PlaceValue::new_sized(ptr, true_val.align));
266 selected.store(self, result);
267 return Ok(());
268 }
269 (OperandValue::Immediate(_), OperandValue::Immediate(_))
270 | (OperandValue::Pair(_, _), OperandValue::Pair(_, _)) => {
271 let true_val = args[1].immediate_or_packed_pair(self);
272 let false_val = args[2].immediate_or_packed_pair(self);
273 select(self, true_val, false_val)
274 }
275 (OperandValue::ZeroSized, OperandValue::ZeroSized) => return Ok(()),
276 _ => ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("Incompatible OperandValue for select_unpredictable"))span_bug!(span, "Incompatible OperandValue for select_unpredictable"),
277 }
278 }
279 sym::catch_unwind => {
280 catch_unwind_intrinsic(
281 self,
282 args[0].immediate(),
283 args[1].immediate(),
284 args[2].immediate(),
285 result,
286 );
287 return Ok(());
288 }
289 sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[], &[]),
290 sym::va_arg => {
291 let BackendRepr::Scalar(scalar) = result.layout.backend_repr else {
292 ::rustc_middle::util::bug::bug_fmt(format_args!("the va_arg intrinsic does not support non-scalar types"))bug!("the va_arg intrinsic does not support non-scalar types")
293 };
294
295 match scalar.primitive() {
299 Primitive::Pointer(_) => {
300 }
302 Primitive::Int(Integer::I128, _) => {
303 ::rustc_middle::util::bug::bug_fmt(format_args!("the va_arg intrinsic does not support `i128`/`u128`"))bug!("the va_arg intrinsic does not support `i128`/`u128`")
306 }
307 Primitive::Int(..) => {
308 let int_width = self.cx().size_of(result.layout.ty).bits();
309 let target_c_int_width = self.cx().sess().target.options.c_int_width;
310 if int_width < u64::from(target_c_int_width) {
311 ::rustc_middle::util::bug::bug_fmt(format_args!("va_arg got i{0} but needs at least c_int (an i{1})",
int_width, target_c_int_width));bug!(
314 "va_arg got i{} but needs at least c_int (an i{})",
315 int_width,
316 target_c_int_width
317 );
318 }
319 }
320 Primitive::Float(Float::F16) => {
321 ::rustc_middle::util::bug::bug_fmt(format_args!("the va_arg intrinsic does not support `f16`"))bug!("the va_arg intrinsic does not support `f16`")
322 }
323 Primitive::Float(Float::F32) => {
324 if self.cx().sess().target.arch != Arch::Avr {
326 ::rustc_middle::util::bug::bug_fmt(format_args!("the va_arg intrinsic does not support `f32` on this target"))bug!("the va_arg intrinsic does not support `f32` on this target")
327 }
328 }
329 Primitive::Float(Float::F64) => {
330 }
332 Primitive::Float(Float::F128) => {
333 ::rustc_middle::util::bug::bug_fmt(format_args!("the va_arg intrinsic does not support `f128`"))bug!("the va_arg intrinsic does not support `f128`")
335 }
336 }
337
338 emit_va_arg(self, args[0], result.layout.ty)
339 }
340
341 sym::volatile_load | sym::unaligned_volatile_load => {
342 let ptr = args[0].immediate();
343 let load = self.volatile_load(result.layout.llvm_type(self), ptr);
344 let align = if name == sym::unaligned_volatile_load {
345 1
346 } else {
347 result.layout.align.bytes() as u32
348 };
349 unsafe {
350 llvm::LLVMSetAlignment(load, align);
351 }
352 if !result.layout.is_zst() {
353 self.store_to_place(load, result.val);
354 }
355 return Ok(());
356 }
357 sym::volatile_store => {
358 let dst = args[0].deref(self.cx());
359 args[1].val.volatile_store(self, dst);
360 return Ok(());
361 }
362 sym::unaligned_volatile_store => {
363 let dst = args[0].deref(self.cx());
364 args[1].val.unaligned_volatile_store(self, dst);
365 return Ok(());
366 }
367 sym::prefetch_read_data
368 | sym::prefetch_write_data
369 | sym::prefetch_read_instruction
370 | sym::prefetch_write_instruction => {
371 let (rw, cache_type) = match name {
372 sym::prefetch_read_data => (0, 1),
373 sym::prefetch_write_data => (1, 1),
374 sym::prefetch_read_instruction => (0, 0),
375 sym::prefetch_write_instruction => (1, 0),
376 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
377 };
378 let ptr = args[0].immediate();
379 let locality = fn_args.const_at(1).to_leaf().to_i32();
380 self.call_intrinsic(
381 "llvm.prefetch",
382 &[self.val_ty(ptr)],
383 &[
384 ptr,
385 self.const_i32(rw),
386 self.const_i32(locality),
387 self.const_i32(cache_type),
388 ],
389 )
390 }
391 sym::carrying_mul_add => {
392 let (size, signed) = fn_args.type_at(0).int_size_and_signed(self.tcx);
393
394 let wide_llty = self.type_ix(size.bits() * 2);
395 let args = args.as_array().unwrap();
396 let [a, b, c, d] = args.map(|a| self.intcast(a.immediate(), wide_llty, signed));
397
398 let wide = if signed {
399 let prod = self.unchecked_smul(a, b);
400 let acc = self.unchecked_sadd(prod, c);
401 self.unchecked_sadd(acc, d)
402 } else {
403 let prod = self.unchecked_umul(a, b);
404 let acc = self.unchecked_uadd(prod, c);
405 self.unchecked_uadd(acc, d)
406 };
407
408 let narrow_llty = self.type_ix(size.bits());
409 let low = self.trunc(wide, narrow_llty);
410 let bits_const = self.const_uint(wide_llty, size.bits());
411 let high = self.lshr(wide, bits_const);
413 let high = self.trunc(high, narrow_llty);
415
416 let pair_llty = self.type_struct(&[narrow_llty, narrow_llty], false);
417 let pair = self.const_poison(pair_llty);
418 let pair = self.insert_value(pair, low, 0);
419 let pair = self.insert_value(pair, high, 1);
420 pair
421 }
422
423 sym::carryless_mul if llvm_version >= (22, 0, 0) => {
425 let ty = args[0].layout.ty;
426 if !ty.is_integral() {
427 tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
428 span,
429 name,
430 ty,
431 });
432 return Ok(());
433 }
434 let (size, _) = ty.int_size_and_signed(self.tcx);
435 let width = size.bits();
436 let llty = self.type_ix(width);
437
438 let lhs = args[0].immediate();
439 let rhs = args[1].immediate();
440 self.call_intrinsic("llvm.clmul", &[llty], &[lhs, rhs])
441 }
442
443 sym::ctlz
444 | sym::ctlz_nonzero
445 | sym::cttz
446 | sym::cttz_nonzero
447 | sym::ctpop
448 | sym::bswap
449 | sym::bitreverse
450 | sym::saturating_add
451 | sym::saturating_sub
452 | sym::unchecked_funnel_shl
453 | sym::unchecked_funnel_shr => {
454 let ty = args[0].layout.ty;
455 if !ty.is_integral() {
456 tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
457 span,
458 name,
459 ty,
460 });
461 return Ok(());
462 }
463 let (size, signed) = ty.int_size_and_signed(self.tcx);
464 let width = size.bits();
465 let llty = self.type_ix(width);
466 match name {
467 sym::ctlz | sym::ctlz_nonzero | sym::cttz | sym::cttz_nonzero => {
468 let y =
469 self.const_bool(name == sym::ctlz_nonzero || name == sym::cttz_nonzero);
470 let llvm_name = if name == sym::ctlz || name == sym::ctlz_nonzero {
471 "llvm.ctlz"
472 } else {
473 "llvm.cttz"
474 };
475 let ret =
476 self.call_intrinsic(llvm_name, &[llty], &[args[0].immediate(), y]);
477 self.intcast(ret, result.layout.llvm_type(self), false)
478 }
479 sym::ctpop => {
480 let ret =
481 self.call_intrinsic("llvm.ctpop", &[llty], &[args[0].immediate()]);
482 self.intcast(ret, result.layout.llvm_type(self), false)
483 }
484 sym::bswap => {
485 if width == 8 {
486 args[0].immediate() } else {
488 self.call_intrinsic("llvm.bswap", &[llty], &[args[0].immediate()])
489 }
490 }
491 sym::bitreverse => {
492 self.call_intrinsic("llvm.bitreverse", &[llty], &[args[0].immediate()])
493 }
494 sym::unchecked_funnel_shl | sym::unchecked_funnel_shr => {
495 let is_left = name == sym::unchecked_funnel_shl;
496 let lhs = args[0].immediate();
497 let rhs = args[1].immediate();
498 let raw_shift = args[2].immediate();
499 let llvm_name = ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("llvm.fsh{0}",
if is_left { 'l' } else { 'r' }))
})format!("llvm.fsh{}", if is_left { 'l' } else { 'r' });
500
501 let raw_shift = self.intcast(raw_shift, self.val_ty(lhs), false);
504
505 self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs, raw_shift])
506 }
507 sym::saturating_add | sym::saturating_sub => {
508 let is_add = name == sym::saturating_add;
509 let lhs = args[0].immediate();
510 let rhs = args[1].immediate();
511 let llvm_name = ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("llvm.{0}{1}.sat",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" }))
})format!(
512 "llvm.{}{}.sat",
513 if signed { 's' } else { 'u' },
514 if is_add { "add" } else { "sub" },
515 );
516 self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs])
517 }
518 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
519 }
520 }
521
522 sym::fabs => {
523 let ty = args[0].layout.ty;
524 let ty::Float(f) = ty.kind() else {
525 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("the `fabs` intrinsic requires a floating-point argument, got {0:?}",
ty));span_bug!(span, "the `fabs` intrinsic requires a floating-point argument, got {:?}", ty);
526 };
527 let llty = self.type_float_from_ty(*f);
528 let llvm_name = "llvm.fabs";
529 self.call_intrinsic(
530 llvm_name,
531 &[llty],
532 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
533 )
534 }
535
536 sym::raw_eq => {
537 use BackendRepr::*;
538 let tp_ty = fn_args.type_at(0);
539 let layout = self.layout_of(tp_ty).layout;
540 let use_integer_compare = match layout.backend_repr() {
541 Scalar(_) | ScalarPair(_, _) => true,
542 SimdVector { .. } => false,
543 SimdScalableVector { .. } => {
544 tcx.dcx().emit_err(InvalidMonomorphization::NonScalableType {
545 span,
546 name: sym::raw_eq,
547 ty: tp_ty,
548 });
549 return Ok(());
550 }
551 Memory { .. } => {
552 layout.size() <= self.data_layout().pointer_size() * 2
556 }
557 };
558
559 let a = args[0].immediate();
560 let b = args[1].immediate();
561 if layout.size().bytes() == 0 {
562 self.const_bool(true)
563 } else if use_integer_compare {
564 let integer_ty = self.type_ix(layout.size().bits());
565 let a_val = self.load(integer_ty, a, layout.align().abi);
566 let b_val = self.load(integer_ty, b, layout.align().abi);
567 self.icmp(IntPredicate::IntEQ, a_val, b_val)
568 } else {
569 let n = self.const_usize(layout.size().bytes());
570 let cmp = self.call_intrinsic("memcmp", &[], &[a, b, n]);
571 self.icmp(IntPredicate::IntEQ, cmp, self.const_int(self.type_int(), 0))
572 }
573 }
574
575 sym::compare_bytes => {
576 let cmp = self.call_intrinsic(
578 "memcmp",
579 &[],
580 &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
581 );
582 self.sext(cmp, self.type_ix(32))
584 }
585
586 sym::black_box => {
587 args[0].val.store(self, result);
588 let result_val_span = [result.val.llval];
589 let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() {
599 ("~{memory}", &[])
600 } else {
601 ("r,~{memory}", &result_val_span)
602 };
603 crate::asm::inline_asm_call(
604 self,
605 "",
606 constraint,
607 inputs,
608 self.type_void(),
609 &[],
610 true,
611 false,
612 llvm::AsmDialect::Att,
613 &[span],
614 false,
615 None,
616 None,
617 )
618 .unwrap_or_else(|| ::rustc_middle::util::bug::bug_fmt(format_args!("failed to generate inline asm call for `black_box`"))bug!("failed to generate inline asm call for `black_box`"));
619
620 return Ok(());
622 }
623
624 sym::gpu_launch_sized_workgroup_mem => {
625 let name = if llvm_version < (23, 0, 0) && tcx.sess.target.arch == Arch::Nvptx64 {
633 "gpu_launch_sized_workgroup_mem"
637 } else {
638 ""
639 };
640 let global = self.declare_global_in_addrspace(
641 name,
642 self.type_array(self.type_i8(), 0),
643 AddressSpace::GPU_WORKGROUP,
644 );
645 let ty::RawPtr(inner_ty, _) = result.layout.ty.kind() else { ::core::panicking::panic("internal error: entered unreachable code")unreachable!() };
646 let alignment = self.align_of(*inner_ty).bytes() as u32;
651 unsafe {
652 if tcx.sess.target.arch == Arch::Nvptx64 {
654 if alignment > llvm::LLVMGetAlignment(global) {
655 llvm::LLVMSetAlignment(global, alignment);
656 }
657 } else {
658 llvm::LLVMSetAlignment(global, alignment);
659 }
660 }
661 self.cx().const_pointercast(global, self.type_ptr())
662 }
663
664 sym::amdgpu_dispatch_ptr => {
665 let val = self.call_intrinsic("llvm.amdgcn.dispatch.ptr", &[], &[]);
666 self.pointercast(val, self.type_ptr())
668 }
669
670 sym::sve_tuple_create2 => {
671 {
match self.layout_of(fn_args.type_at(0)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(1), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(1), .. }",
::core::option::Option::None);
}
}
};assert_matches!(
672 self.layout_of(fn_args.type_at(0)).backend_repr,
673 BackendRepr::SimdScalableVector {
674 number_of_vectors: NumScalableVectors(1),
675 ..
676 }
677 );
678 let tuple_ty = self.layout_of(fn_args.type_at(1));
679 {
match tuple_ty.backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(2), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(2), .. }",
::core::option::Option::None);
}
}
};assert_matches!(
680 tuple_ty.backend_repr,
681 BackendRepr::SimdScalableVector {
682 number_of_vectors: NumScalableVectors(2),
683 ..
684 }
685 );
686 let ret = self.const_poison(self.backend_type(tuple_ty));
687 let ret = self.insert_value(ret, args[0].immediate(), 0);
688 self.insert_value(ret, args[1].immediate(), 1)
689 }
690
691 sym::sve_tuple_create3 => {
692 {
match self.layout_of(fn_args.type_at(0)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(1), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(1), .. }",
::core::option::Option::None);
}
}
};assert_matches!(
693 self.layout_of(fn_args.type_at(0)).backend_repr,
694 BackendRepr::SimdScalableVector {
695 number_of_vectors: NumScalableVectors(1),
696 ..
697 }
698 );
699 let tuple_ty = self.layout_of(fn_args.type_at(1));
700 {
match tuple_ty.backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(3), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(3), .. }",
::core::option::Option::None);
}
}
};assert_matches!(
701 tuple_ty.backend_repr,
702 BackendRepr::SimdScalableVector {
703 number_of_vectors: NumScalableVectors(3),
704 ..
705 }
706 );
707 let ret = self.const_poison(self.backend_type(tuple_ty));
708 let ret = self.insert_value(ret, args[0].immediate(), 0);
709 let ret = self.insert_value(ret, args[1].immediate(), 1);
710 self.insert_value(ret, args[2].immediate(), 2)
711 }
712
713 sym::sve_tuple_create4 => {
714 {
match self.layout_of(fn_args.type_at(0)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(1), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(1), .. }",
::core::option::Option::None);
}
}
};assert_matches!(
715 self.layout_of(fn_args.type_at(0)).backend_repr,
716 BackendRepr::SimdScalableVector {
717 number_of_vectors: NumScalableVectors(1),
718 ..
719 }
720 );
721 let tuple_ty = self.layout_of(fn_args.type_at(1));
722 {
match tuple_ty.backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(4), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(4), .. }",
::core::option::Option::None);
}
}
};assert_matches!(
723 tuple_ty.backend_repr,
724 BackendRepr::SimdScalableVector {
725 number_of_vectors: NumScalableVectors(4),
726 ..
727 }
728 );
729 let ret = self.const_poison(self.backend_type(tuple_ty));
730 let ret = self.insert_value(ret, args[0].immediate(), 0);
731 let ret = self.insert_value(ret, args[1].immediate(), 1);
732 let ret = self.insert_value(ret, args[2].immediate(), 2);
733 self.insert_value(ret, args[3].immediate(), 3)
734 }
735
736 sym::sve_tuple_get => {
737 {
match self.layout_of(fn_args.type_at(0)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8),
.. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8), .. }",
::core::option::Option::None);
}
}
};assert_matches!(
738 self.layout_of(fn_args.type_at(0)).backend_repr,
739 BackendRepr::SimdScalableVector {
740 number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8),
741 ..
742 }
743 );
744 {
match self.layout_of(fn_args.type_at(1)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(1), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(1), .. }",
::core::option::Option::None);
}
}
};assert_matches!(
745 self.layout_of(fn_args.type_at(1)).backend_repr,
746 BackendRepr::SimdScalableVector {
747 number_of_vectors: NumScalableVectors(1),
748 ..
749 }
750 );
751 self.extract_value(
752 args[0].immediate(),
753 fn_args.const_at(2).to_leaf().to_i32() as u64,
754 )
755 }
756
757 sym::sve_tuple_set => {
758 {
match self.layout_of(fn_args.type_at(0)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8),
.. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8), .. }",
::core::option::Option::None);
}
}
};assert_matches!(
759 self.layout_of(fn_args.type_at(0)).backend_repr,
760 BackendRepr::SimdScalableVector {
761 number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8),
762 ..
763 }
764 );
765 {
match self.layout_of(fn_args.type_at(1)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(1), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(1), .. }",
::core::option::Option::None);
}
}
};assert_matches!(
766 self.layout_of(fn_args.type_at(1)).backend_repr,
767 BackendRepr::SimdScalableVector {
768 number_of_vectors: NumScalableVectors(1),
769 ..
770 }
771 );
772 self.insert_value(
773 args[0].immediate(),
774 args[1].immediate(),
775 fn_args.const_at(2).to_leaf().to_i32() as u64,
776 )
777 }
778
779 _ if name.as_str().starts_with("simd_") => {
780 let mut loaded_args = Vec::new();
783 for arg in args {
784 loaded_args.push(
785 if arg.layout.ty.is_simd()
790 && let OperandValue::Ref(place) = arg.val
791 {
792 let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx());
793 let elem_ll_ty = match elem_ty.kind() {
794 ty::Float(f) => self.type_float_from_ty(*f),
795 ty::Int(i) => self.type_int_from_ty(*i),
796 ty::Uint(u) => self.type_uint_from_ty(*u),
797 ty::RawPtr(_, _) => self.type_ptr(),
798 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
799 };
800 let loaded =
801 self.load_from_place(self.type_vector(elem_ll_ty, size), place);
802 OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
803 } else {
804 *arg
805 },
806 );
807 }
808
809 let llret_ty = if result.layout.ty.is_simd()
810 && let BackendRepr::Memory { .. } = result.layout.backend_repr
811 {
812 let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx());
813 let elem_ll_ty = match elem_ty.kind() {
814 ty::Float(f) => self.type_float_from_ty(*f),
815 ty::Int(i) => self.type_int_from_ty(*i),
816 ty::Uint(u) => self.type_uint_from_ty(*u),
817 ty::RawPtr(_, _) => self.type_ptr(),
818 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
819 };
820 self.type_vector(elem_ll_ty, size)
821 } else {
822 result.layout.llvm_type(self)
823 };
824
825 match generic_simd_intrinsic(
826 self,
827 name,
828 fn_args,
829 &loaded_args,
830 result.layout.ty,
831 llret_ty,
832 span,
833 ) {
834 Ok(llval) => llval,
835 Err(()) => return Ok(()),
838 }
839 }
840
841 _ => {
842 {
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_llvm/src/intrinsic.rs:842",
"rustc_codegen_llvm::intrinsic", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_llvm/src/intrinsic.rs"),
::tracing_core::__macro_support::Option::Some(842u32),
::tracing_core::__macro_support::Option::Some("rustc_codegen_llvm::intrinsic"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("unknown intrinsic \'{0}\' -- falling back to default body",
name) as &dyn Value))])
});
} else { ; }
};debug!("unknown intrinsic '{}' -- falling back to default body", name);
843 return Err(ty::Instance::new_raw(instance.def_id(), instance.args));
845 }
846 };
847
848 if result.layout.ty.is_bool() {
849 let val = self.from_immediate(llval);
850 self.store_to_place(val, result.val);
851 } else if !result.layout.ty.is_unit() {
852 self.store_to_place(llval, result.val);
853 }
854 Ok(())
855 }
856
857 fn codegen_llvm_intrinsic_call(
858 &mut self,
859 instance: ty::Instance<'tcx>,
860 args: &[OperandRef<'tcx, Self::Value>],
861 _is_cleanup: bool,
862 ) -> Self::Value {
863 let tcx = self.tcx();
864
865 let fn_ty = instance.ty(tcx, self.typing_env());
866 let fn_sig = match *fn_ty.kind() {
867 ty::FnDef(def_id, args) => tcx.instantiate_bound_regions_with_erased(
868 tcx.fn_sig(def_id).instantiate(tcx, args).skip_norm_wip(),
869 ),
870 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
871 };
872 if !!fn_sig.c_variadic() {
::core::panicking::panic("assertion failed: !fn_sig.c_variadic()")
};assert!(!fn_sig.c_variadic());
873
874 let ret_layout = self.layout_of(fn_sig.output());
875 let llreturn_ty = if ret_layout.is_zst() {
876 self.type_void()
877 } else {
878 ret_layout.immediate_llvm_type(self)
879 };
880
881 let mut llargument_tys = Vec::with_capacity(fn_sig.inputs().len());
882 for &arg in fn_sig.inputs() {
883 let arg_layout = self.layout_of(arg);
884 if arg_layout.is_zst() {
885 continue;
886 }
887 llargument_tys.push(arg_layout.immediate_llvm_type(self));
888 }
889
890 let fn_ptr = if let Some(&llfn) = self.intrinsic_instances.borrow().get(&instance) {
891 llfn
892 } else {
893 let sym = tcx.symbol_name(instance).name;
894
895 let llfn = if let Some(llfn) = self.get_declared_value(sym) {
896 llfn
897 } else {
898 intrinsic_fn(self, sym, llreturn_ty, llargument_tys, instance)
899 };
900
901 self.intrinsic_instances.borrow_mut().insert(instance, llfn);
902
903 llfn
904 };
905 let fn_ty = self.get_type_of_global(fn_ptr);
906
907 let mut llargs = ::alloc::vec::Vec::new()vec![];
908
909 for arg in args {
910 match arg.val {
911 OperandValue::ZeroSized => {}
912 OperandValue::Immediate(a) => llargs.push(a),
913 OperandValue::Pair(a, b) => {
914 llargs.push(a);
915 llargs.push(b);
916 }
917 OperandValue::Ref(op_place_val) => {
918 let mut llval = op_place_val.llval;
919 llval = self.load(self.backend_type(arg.layout), llval, op_place_val.align);
925 if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
926 if scalar.is_bool() {
927 self.range_metadata(llval, WrappingRange { start: 0, end: 1 });
928 }
929 llval = self.to_immediate_scalar(llval, scalar);
931 }
932 llargs.push(llval);
933 }
934 }
935 }
936
937 {
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_llvm/src/intrinsic.rs:937",
"rustc_codegen_llvm::intrinsic", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_llvm/src/intrinsic.rs"),
::tracing_core::__macro_support::Option::Some(937u32),
::tracing_core::__macro_support::Option::Some("rustc_codegen_llvm::intrinsic"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("call intrinsic {0:?} with args ({1:?})",
instance, llargs) as &dyn Value))])
});
} else { ; }
};debug!("call intrinsic {:?} with args ({:?})", instance, llargs);
938
939 for (dest_ty, arg) in iter::zip(self.func_params_types(fn_ty), &mut llargs) {
940 let src_ty = self.val_ty(arg);
941 if !can_autocast(self, src_ty, dest_ty) {
{
::core::panicking::panic_fmt(format_args!("Cannot match `{0:?}` (expected) with {1:?} (found) in `{2:?}",
dest_ty, src_ty, fn_ptr));
}
};assert!(
942 can_autocast(self, src_ty, dest_ty),
943 "Cannot match `{dest_ty:?}` (expected) with {src_ty:?} (found) in `{fn_ptr:?}"
944 );
945
946 *arg = autocast(self, arg, src_ty, dest_ty);
947 }
948
949 let llret = unsafe {
950 llvm::LLVMBuildCallWithOperandBundles(
951 self.llbuilder,
952 fn_ty,
953 fn_ptr,
954 llargs.as_ptr(),
955 llargs.len() as c_uint,
956 ptr::dangling(),
957 0,
958 c"".as_ptr(),
959 )
960 };
961
962 let src_ty = self.val_ty(llret);
963 let dest_ty = llreturn_ty;
964 if !can_autocast(self, dest_ty, src_ty) {
{
::core::panicking::panic_fmt(format_args!("Cannot match `{0:?}` (expected) with `{1:?}` (found) in `{2:?}`",
src_ty, dest_ty, fn_ptr));
}
};assert!(
965 can_autocast(self, dest_ty, src_ty),
966 "Cannot match `{src_ty:?}` (expected) with `{dest_ty:?}` (found) in `{fn_ptr:?}`"
967 );
968
969 autocast(self, llret, src_ty, dest_ty)
970 }
971
972 fn abort(&mut self) {
973 self.call_intrinsic("llvm.trap", &[], &[]);
974 }
975
976 fn assume(&mut self, val: Self::Value) {
977 if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
978 self.call_intrinsic("llvm.assume", &[], &[val]);
979 }
980 }
981
982 fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
983 if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
984 self.call_intrinsic(
985 "llvm.expect",
986 &[self.type_i1()],
987 &[cond, self.const_bool(expected)],
988 )
989 } else {
990 cond
991 }
992 }
993
994 fn type_checked_load(
995 &mut self,
996 llvtable: &'ll Value,
997 vtable_byte_offset: u64,
998 typeid: &[u8],
999 ) -> Self::Value {
1000 let typeid = self.create_metadata(typeid);
1001 let typeid = self.get_metadata_value(typeid);
1002 let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
1003 let type_checked_load = self.call_intrinsic(
1004 "llvm.type.checked.load",
1005 &[],
1006 &[llvtable, vtable_byte_offset, typeid],
1007 );
1008 self.extract_value(type_checked_load, 0)
1009 }
1010
1011 fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
1012 self.call_intrinsic("llvm.va_start", &[self.val_ty(va_list)], &[va_list])
1013 }
1014
1015 fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
1016 self.call_intrinsic("llvm.va_end", &[self.val_ty(va_list)], &[va_list])
1017 }
1018}
1019
1020fn llvm_arch_for(rust_arch: &Arch) -> Option<&'static str> {
1021 Some(match rust_arch {
1022 Arch::AArch64 | Arch::Arm64EC => "aarch64",
1023 Arch::AmdGpu => "amdgcn",
1024 Arch::Arm => "arm",
1025 Arch::Bpf => "bpf",
1026 Arch::Hexagon => "hexagon",
1027 Arch::LoongArch32 | Arch::LoongArch64 => "loongarch",
1028 Arch::Mips | Arch::Mips32r6 | Arch::Mips64 | Arch::Mips64r6 => "mips",
1029 Arch::Nvptx64 => "nvvm",
1030 Arch::PowerPC | Arch::PowerPC64 => "ppc",
1031 Arch::RiscV32 | Arch::RiscV64 => "riscv",
1032 Arch::S390x => "s390",
1033 Arch::SpirV => "spv",
1034 Arch::Wasm32 | Arch::Wasm64 => "wasm",
1035 Arch::X86 | Arch::X86_64 => "x86",
1036 _ => return None, })
1038}
1039
1040fn can_autocast<'ll>(cx: &CodegenCx<'ll, '_>, rust_ty: &'ll Type, llvm_ty: &'ll Type) -> bool {
1041 if rust_ty == llvm_ty {
1042 return true;
1043 }
1044
1045 match cx.type_kind(llvm_ty) {
1046 TypeKind::Struct if cx.type_kind(rust_ty) == TypeKind::Struct => {
1050 let rust_element_tys = cx.struct_element_types(rust_ty);
1051 let llvm_element_tys = cx.struct_element_types(llvm_ty);
1052
1053 if rust_element_tys.len() != llvm_element_tys.len() {
1054 return false;
1055 }
1056
1057 iter::zip(rust_element_tys, llvm_element_tys).all(
1058 |(rust_element_ty, llvm_element_ty)| {
1059 can_autocast(cx, rust_element_ty, llvm_element_ty)
1060 },
1061 )
1062 }
1063 TypeKind::Vector => {
1064 let llvm_element_ty = cx.element_type(llvm_ty);
1065 let element_count = cx.vector_length(llvm_ty) as u64;
1066
1067 if llvm_element_ty == cx.type_bf16() {
1068 rust_ty == cx.type_vector(cx.type_i16(), element_count)
1069 } else if llvm_element_ty == cx.type_i1() {
1070 let int_width = element_count.next_power_of_two().max(8);
1071 rust_ty == cx.type_ix(int_width)
1072 } else {
1073 false
1074 }
1075 }
1076 TypeKind::BFloat => rust_ty == cx.type_i16(),
1077 TypeKind::X86_AMX if cx.type_kind(rust_ty) == TypeKind::Vector => {
1078 let element_ty = cx.element_type(rust_ty);
1079 let element_count = cx.vector_length(rust_ty) as u64;
1080
1081 let element_size_bits = match cx.type_kind(element_ty) {
1082 TypeKind::Half => 16,
1083 TypeKind::Float => 32,
1084 TypeKind::Double => 64,
1085 TypeKind::FP128 => 128,
1086 TypeKind::Integer => cx.int_width(element_ty),
1087 TypeKind::Pointer => cx.int_width(cx.isize_ty),
1088 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("Vector element type `{0:?}` not one of integer, float or pointer",
element_ty))bug!(
1089 "Vector element type `{element_ty:?}` not one of integer, float or pointer"
1090 ),
1091 };
1092
1093 element_size_bits * element_count == 8192
1094 }
1095 _ => false,
1096 }
1097}
1098
1099fn autocast<'ll>(
1100 bx: &mut Builder<'_, 'll, '_>,
1101 val: &'ll Value,
1102 src_ty: &'ll Type,
1103 dest_ty: &'ll Type,
1104) -> &'ll Value {
1105 if src_ty == dest_ty {
1106 return val;
1107 }
1108 match (bx.type_kind(src_ty), bx.type_kind(dest_ty)) {
1109 (TypeKind::Struct, TypeKind::Struct) => {
1111 let mut ret = bx.const_poison(dest_ty);
1112 for (idx, (src_element_ty, dest_element_ty)) in
1113 iter::zip(bx.struct_element_types(src_ty), bx.struct_element_types(dest_ty))
1114 .enumerate()
1115 {
1116 let elt = bx.extract_value(val, idx as u64);
1117 let casted_elt = autocast(bx, elt, src_element_ty, dest_element_ty);
1118 ret = bx.insert_value(ret, casted_elt, idx as u64);
1119 }
1120 ret
1121 }
1122 (TypeKind::Vector, TypeKind::Integer) if bx.element_type(src_ty) == bx.type_i1() => {
1124 let vector_length = bx.vector_length(src_ty) as u64;
1125 let int_width = vector_length.next_power_of_two().max(8);
1126
1127 let val = if vector_length == int_width {
1128 val
1129 } else {
1130 let shuffle_indices = match vector_length {
1132 0 => {
::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
format_args!("zero length vectors are not allowed")));
}unreachable!("zero length vectors are not allowed"),
1133 1 => ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[0, 1, 1, 1, 1, 1, 1, 1]))vec![0, 1, 1, 1, 1, 1, 1, 1],
1134 2 => ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[0, 1, 2, 2, 2, 2, 2, 2]))vec![0, 1, 2, 2, 2, 2, 2, 2],
1135 3 => ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[0, 1, 2, 3, 3, 3, 3, 3]))vec![0, 1, 2, 3, 3, 3, 3, 3],
1136 4.. => (0..int_width as i32).collect(),
1137 };
1138 let shuffle_mask =
1139 shuffle_indices.into_iter().map(|i| bx.const_i32(i)).collect::<Vec<_>>();
1140 bx.shuffle_vector(val, bx.const_null(src_ty), bx.const_vector(&shuffle_mask))
1141 };
1142 bx.bitcast(val, dest_ty)
1143 }
1144 (TypeKind::Integer, TypeKind::Vector) if bx.element_type(dest_ty) == bx.type_i1() => {
1146 let vector_length = bx.vector_length(dest_ty) as u64;
1147 let int_width = vector_length.next_power_of_two().max(8);
1148
1149 let intermediate_ty = bx.type_vector(bx.type_i1(), int_width);
1150 let intermediate = bx.bitcast(val, intermediate_ty);
1151
1152 if vector_length == int_width {
1153 intermediate
1154 } else {
1155 let shuffle_mask: Vec<_> =
1156 (0..vector_length).map(|i| bx.const_i32(i as i32)).collect();
1157 bx.shuffle_vector(
1158 intermediate,
1159 bx.const_poison(intermediate_ty),
1160 bx.const_vector(&shuffle_mask),
1161 )
1162 }
1163 }
1164 (TypeKind::Vector, TypeKind::X86_AMX) => {
1165 bx.call_intrinsic("llvm.x86.cast.vector.to.tile", &[src_ty], &[val])
1166 }
1167 (TypeKind::X86_AMX, TypeKind::Vector) => {
1168 bx.call_intrinsic("llvm.x86.cast.tile.to.vector", &[dest_ty], &[val])
1169 }
1170 _ => bx.bitcast(val, dest_ty), }
1172}
1173
1174fn intrinsic_fn<'ll, 'tcx>(
1175 bx: &Builder<'_, 'll, 'tcx>,
1176 name: &str,
1177 rust_return_ty: &'ll Type,
1178 rust_argument_tys: Vec<&'ll Type>,
1179 instance: ty::Instance<'tcx>,
1180) -> &'ll Value {
1181 let tcx = bx.tcx;
1182
1183 let rust_fn_ty = bx.type_func(&rust_argument_tys, rust_return_ty);
1184
1185 let intrinsic = llvm::Intrinsic::lookup(name.as_bytes());
1186
1187 if let Some(intrinsic) = intrinsic
1188 && intrinsic.is_target_specific()
1189 {
1190 let (llvm_arch, _) = name[5..].split_once('.').unwrap();
1191 let rust_arch = &tcx.sess.target.arch;
1192
1193 if let Some(correct_llvm_arch) = llvm_arch_for(rust_arch)
1194 && llvm_arch != correct_llvm_arch
1195 {
1196 tcx.dcx().emit_fatal(IntrinsicWrongArch {
1197 name,
1198 target_arch: rust_arch.desc(),
1199 span: tcx.def_span(instance.def_id()),
1200 });
1201 }
1202 }
1203
1204 if let Some(intrinsic) = intrinsic
1205 && !intrinsic.is_overloaded()
1206 {
1207 let llfn = intrinsic.get_declaration(bx.llmod, &[]);
1209 let llvm_fn_ty = bx.get_type_of_global(llfn);
1210
1211 let llvm_return_ty = bx.get_return_type(llvm_fn_ty);
1212 let llvm_argument_tys = bx.func_params_types(llvm_fn_ty);
1213 let llvm_is_variadic = bx.func_is_variadic(llvm_fn_ty);
1214
1215 let is_correct_signature = !llvm_is_variadic
1216 && rust_argument_tys.len() == llvm_argument_tys.len()
1217 && iter::once((rust_return_ty, llvm_return_ty))
1218 .chain(iter::zip(rust_argument_tys, llvm_argument_tys))
1219 .all(|(rust_ty, llvm_ty)| can_autocast(bx, rust_ty, llvm_ty));
1220
1221 if !is_correct_signature {
1222 tcx.dcx().emit_fatal(IntrinsicSignatureMismatch {
1223 name,
1224 llvm_fn_ty: &::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0:?}", llvm_fn_ty))
})format!("{llvm_fn_ty:?}"),
1225 rust_fn_ty: &::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0:?}", rust_fn_ty))
})format!("{rust_fn_ty:?}"),
1226 span: tcx.def_span(instance.def_id()),
1227 });
1228 }
1229
1230 return llfn;
1231 }
1232
1233 let llfn = declare_raw_fn(
1235 bx,
1236 name,
1237 llvm::CCallConv,
1238 llvm::UnnamedAddr::Global,
1239 llvm::Visibility::Default,
1240 rust_fn_ty,
1241 );
1242
1243 if intrinsic.is_none() {
1244 let mut new_llfn = None;
1245 let can_upgrade = unsafe { llvm::LLVMRustUpgradeIntrinsicFunction(llfn, &mut new_llfn) };
1246
1247 if !can_upgrade {
1248 tcx.dcx().emit_fatal(UnknownIntrinsic { name, span: tcx.def_span(instance.def_id()) });
1250 } else if let Some(def_id) = instance.def_id().as_local() {
1251 let hir_id = tcx.local_def_id_to_hir_id(def_id);
1253
1254 let msg = if let Some(new_llfn) = new_llfn {
1256 ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("using deprecated intrinsic `{1}`, `{0}` can be used instead",
str::from_utf8(&llvm::get_value_name(new_llfn)).unwrap(),
name))
})format!(
1257 "using deprecated intrinsic `{name}`, `{}` can be used instead",
1258 str::from_utf8(&llvm::get_value_name(new_llfn)).unwrap()
1259 )
1260 } else {
1261 ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("using deprecated intrinsic `{0}`",
name))
})format!("using deprecated intrinsic `{name}`")
1262 };
1263
1264 tcx.emit_node_lint(
1265 DEPRECATED_LLVM_INTRINSIC,
1266 hir_id,
1267 rustc_errors::DiagDecorator(|d| {
1268 d.primary_message(msg).span(tcx.hir_span(hir_id));
1269 }),
1270 );
1271 }
1272 }
1273
1274 llfn
1275}
1276
1277fn catch_unwind_intrinsic<'ll, 'tcx>(
1278 bx: &mut Builder<'_, 'll, 'tcx>,
1279 try_func: &'ll Value,
1280 data: &'ll Value,
1281 catch_func: &'ll Value,
1282 dest: PlaceRef<'tcx, &'ll Value>,
1283) {
1284 if !bx.sess().panic_strategy().unwinds() {
1285 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1286 bx.call(try_func_ty, None, None, try_func, &[data], None, None);
1287 OperandValue::Immediate(bx.const_i32(0)).store(bx, dest);
1290 } else if wants_msvc_seh(bx.sess()) {
1291 codegen_msvc_try(bx, try_func, data, catch_func, dest);
1292 } else if wants_wasm_eh(bx.sess()) {
1293 codegen_wasm_try(bx, try_func, data, catch_func, dest);
1294 } else if bx.sess().target.os == Os::Emscripten {
1295 codegen_emcc_try(bx, try_func, data, catch_func, dest);
1296 } else {
1297 codegen_gnu_try(bx, try_func, data, catch_func, dest);
1298 }
1299}
1300
1301fn codegen_msvc_try<'ll, 'tcx>(
1309 bx: &mut Builder<'_, 'll, 'tcx>,
1310 try_func: &'ll Value,
1311 data: &'ll Value,
1312 catch_func: &'ll Value,
1313 dest: PlaceRef<'tcx, &'ll Value>,
1314) {
1315 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
1316 bx.set_personality_fn(bx.eh_personality());
1317
1318 let normal = bx.append_sibling_block("normal");
1319 let catchswitch = bx.append_sibling_block("catchswitch");
1320 let catchpad_rust = bx.append_sibling_block("catchpad_rust");
1321 let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
1322 let caught = bx.append_sibling_block("caught");
1323
1324 let try_func = llvm::get_param(bx.llfn(), 0);
1325 let data = llvm::get_param(bx.llfn(), 1);
1326 let catch_func = llvm::get_param(bx.llfn(), 2);
1327
1328 let ptr_size = bx.tcx().data_layout.pointer_size();
1384 let ptr_align = bx.tcx().data_layout.pointer_align().abi;
1385 let slot = bx.alloca(ptr_size, ptr_align);
1386 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1387 bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
1388
1389 bx.switch_to_block(normal);
1390 bx.ret(bx.const_i32(0));
1391
1392 bx.switch_to_block(catchswitch);
1393 let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
1394
1395 let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
1410 let type_name = bx.const_bytes(b"rust_panic\0");
1411 let type_info =
1412 bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
1413 let tydesc = bx.declare_global(
1414 &mangle_internal_symbol(bx.tcx, "__rust_panic_type_info"),
1415 bx.val_ty(type_info),
1416 );
1417
1418 llvm::set_linkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
1419 if bx.cx.tcx.sess.target.supports_comdat() {
1420 llvm::SetUniqueComdat(bx.llmod, tydesc);
1421 }
1422 llvm::set_initializer(tydesc, type_info);
1423
1424 bx.switch_to_block(catchpad_rust);
1431 let flags = bx.const_i32(8);
1432 let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
1433 let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
1434 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1435 bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
1436 bx.catch_ret(&funclet, caught);
1437
1438 bx.switch_to_block(catchpad_foreign);
1440 let flags = bx.const_i32(64);
1441 let null = bx.const_null(bx.type_ptr());
1442 let funclet = bx.catch_pad(cs, &[null, flags, null]);
1443 bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet), None);
1444 bx.catch_ret(&funclet, caught);
1445
1446 bx.switch_to_block(caught);
1447 bx.ret(bx.const_i32(1));
1448 });
1449
1450 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1453 OperandValue::Immediate(ret).store(bx, dest);
1454}
1455
1456fn codegen_wasm_try<'ll, 'tcx>(
1458 bx: &mut Builder<'_, 'll, 'tcx>,
1459 try_func: &'ll Value,
1460 data: &'ll Value,
1461 catch_func: &'ll Value,
1462 dest: PlaceRef<'tcx, &'ll Value>,
1463) {
1464 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
1465 bx.set_personality_fn(bx.eh_personality());
1466
1467 let normal = bx.append_sibling_block("normal");
1468 let catchswitch = bx.append_sibling_block("catchswitch");
1469 let catchpad = bx.append_sibling_block("catchpad");
1470 let caught = bx.append_sibling_block("caught");
1471
1472 let try_func = llvm::get_param(bx.llfn(), 0);
1473 let data = llvm::get_param(bx.llfn(), 1);
1474 let catch_func = llvm::get_param(bx.llfn(), 2);
1475
1476 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1500 bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
1501
1502 bx.switch_to_block(normal);
1503 bx.ret(bx.const_i32(0));
1504
1505 bx.switch_to_block(catchswitch);
1506 let cs = bx.catch_switch(None, None, &[catchpad]);
1507
1508 bx.switch_to_block(catchpad);
1509 let null = bx.const_null(bx.type_ptr());
1510 let funclet = bx.catch_pad(cs, &[null]);
1511
1512 let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[], &[funclet.cleanuppad()]);
1513 let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[], &[funclet.cleanuppad()]);
1514
1515 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1516 bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
1517 bx.catch_ret(&funclet, caught);
1518
1519 bx.switch_to_block(caught);
1520 bx.ret(bx.const_i32(1));
1521 });
1522
1523 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1526 OperandValue::Immediate(ret).store(bx, dest);
1527}
1528
1529fn codegen_gnu_try<'ll, 'tcx>(
1541 bx: &mut Builder<'_, 'll, 'tcx>,
1542 try_func: &'ll Value,
1543 data: &'ll Value,
1544 catch_func: &'ll Value,
1545 dest: PlaceRef<'tcx, &'ll Value>,
1546) {
1547 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
1548 let then = bx.append_sibling_block("then");
1561 let catch = bx.append_sibling_block("catch");
1562
1563 let try_func = llvm::get_param(bx.llfn(), 0);
1564 let data = llvm::get_param(bx.llfn(), 1);
1565 let catch_func = llvm::get_param(bx.llfn(), 2);
1566 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1567 bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
1568
1569 bx.switch_to_block(then);
1570 bx.ret(bx.const_i32(0));
1571
1572 bx.switch_to_block(catch);
1579 let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
1580 let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
1581 let tydesc = bx.const_null(bx.type_ptr());
1582 bx.add_clause(vals, tydesc);
1583 let ptr = bx.extract_value(vals, 0);
1584 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1585 bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None);
1586 bx.ret(bx.const_i32(1));
1587 });
1588
1589 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1592 OperandValue::Immediate(ret).store(bx, dest);
1593}
1594
1595fn codegen_emcc_try<'ll, 'tcx>(
1599 bx: &mut Builder<'_, 'll, 'tcx>,
1600 try_func: &'ll Value,
1601 data: &'ll Value,
1602 catch_func: &'ll Value,
1603 dest: PlaceRef<'tcx, &'ll Value>,
1604) {
1605 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
1606 let then = bx.append_sibling_block("then");
1624 let catch = bx.append_sibling_block("catch");
1625
1626 let try_func = llvm::get_param(bx.llfn(), 0);
1627 let data = llvm::get_param(bx.llfn(), 1);
1628 let catch_func = llvm::get_param(bx.llfn(), 2);
1629 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1630 bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
1631
1632 bx.switch_to_block(then);
1633 bx.ret(bx.const_i32(0));
1634
1635 bx.switch_to_block(catch);
1641 let tydesc = bx.eh_catch_typeinfo();
1642 let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
1643 let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
1644 bx.add_clause(vals, tydesc);
1645 bx.add_clause(vals, bx.const_null(bx.type_ptr()));
1646 let ptr = bx.extract_value(vals, 0);
1647 let selector = bx.extract_value(vals, 1);
1648
1649 let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[bx.val_ty(tydesc)], &[tydesc]);
1651 let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
1652 let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
1653
1654 let ptr_size = bx.tcx().data_layout.pointer_size();
1657 let ptr_align = bx.tcx().data_layout.pointer_align().abi;
1658 let i8_align = bx.tcx().data_layout.i8_align;
1659 if !(i8_align <= ptr_align) {
::core::panicking::panic("assertion failed: i8_align <= ptr_align")
};assert!(i8_align <= ptr_align);
1661 let catch_data = bx.alloca(2 * ptr_size, ptr_align);
1662 bx.store(ptr, catch_data, ptr_align);
1663 let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
1664 bx.store(is_rust_panic, catch_data_1, i8_align);
1665
1666 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1667 bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None, None);
1668 bx.ret(bx.const_i32(1));
1669 });
1670
1671 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1674 OperandValue::Immediate(ret).store(bx, dest);
1675}
1676
1677fn gen_fn<'a, 'll, 'tcx>(
1680 cx: &'a CodegenCx<'ll, 'tcx>,
1681 name: &str,
1682 rust_fn_sig: ty::PolyFnSig<'tcx>,
1683 codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1684) -> (&'ll Type, &'ll Value) {
1685 let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
1686 let llty = fn_abi.llvm_type(cx);
1687 let llfn = cx.declare_fn(name, fn_abi, None);
1688 cx.set_frame_pointer_type(llfn);
1689 cx.apply_target_cpu_attr(llfn);
1690 llvm::set_linkage(llfn, llvm::Linkage::InternalLinkage);
1692 let llbb = Builder::append_block(cx, llfn, "entry-block");
1693 let bx = Builder::build(cx, llbb);
1694 codegen(bx);
1695 (llty, llfn)
1696}
1697
1698fn get_rust_try_fn<'a, 'll, 'tcx>(
1703 cx: &'a CodegenCx<'ll, 'tcx>,
1704 codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1705) -> (&'ll Type, &'ll Value) {
1706 if let Some(llfn) = cx.rust_try_fn.get() {
1707 return llfn;
1708 }
1709
1710 let tcx = cx.tcx;
1712 let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
1713 let try_fn_ty = Ty::new_fn_ptr(
1715 tcx,
1716 ty::Binder::dummy(tcx.mk_fn_sig_rust_abi([i8p], tcx.types.unit, hir::Safety::Unsafe)),
1717 );
1718 let catch_fn_ty = Ty::new_fn_ptr(
1720 tcx,
1721 ty::Binder::dummy(tcx.mk_fn_sig_rust_abi([i8p, i8p], tcx.types.unit, hir::Safety::Unsafe)),
1722 );
1723 let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig_rust_abi(
1725 [try_fn_ty, i8p, catch_fn_ty],
1726 tcx.types.i32,
1727 hir::Safety::Unsafe,
1728 ));
1729 let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
1730 cx.rust_try_fn.set(Some(rust_try));
1731 rust_try
1732}
1733
1734fn codegen_autodiff<'ll, 'tcx>(
1735 bx: &mut Builder<'_, 'll, 'tcx>,
1736 tcx: TyCtxt<'tcx>,
1737 instance: ty::Instance<'tcx>,
1738 args: &[OperandRef<'tcx, &'ll Value>],
1739 result: PlaceRef<'tcx, &'ll Value>,
1740) {
1741 if !tcx.sess.opts.unstable_opts.autodiff.contains(&rustc_session::config::AutoDiff::Enable) {
1742 let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutEnable);
1743 }
1744
1745 let ct = tcx.crate_types();
1746 let lto = tcx.sess.lto();
1747 if ct.len() == 1 && ct.contains(&CrateType::Executable) {
1748 if lto != rustc_session::config::Lto::Fat {
1749 let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutLto);
1750 }
1751 } else {
1752 if lto != rustc_session::config::Lto::Fat && !tcx.sess.opts.cg.linker_plugin_lto.enabled() {
1753 let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutLto);
1754 }
1755 }
1756
1757 let fn_args = instance.args;
1758 let callee_ty = instance.ty(tcx, bx.typing_env());
1759
1760 let sig = callee_ty.fn_sig(tcx).skip_binder();
1761
1762 let ret_ty = sig.output();
1763 let llret_ty = bx.layout_of(ret_ty).llvm_type(bx);
1764
1765 let source_fn_ptr_ty = fn_args.into_type_list(tcx)[0];
1766 let fn_to_diff = args[0].immediate();
1767
1768 let (diff_id, diff_args) = match fn_args.into_type_list(tcx)[1].kind() {
1769 ty::FnDef(def_id, diff_args) => (def_id, diff_args),
1770 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("invalid args"))bug!("invalid args"),
1771 };
1772
1773 let fn_diff = match Instance::try_resolve(tcx, bx.cx.typing_env(), *diff_id, diff_args) {
1774 Ok(Some(instance)) => instance,
1775 Ok(None) => ::rustc_middle::util::bug::bug_fmt(format_args!("could not resolve ({0:?}, {1:?}) to a specific autodiff instance",
diff_id, diff_args))bug!(
1776 "could not resolve ({:?}, {:?}) to a specific autodiff instance",
1777 diff_id,
1778 diff_args
1779 ),
1780 Err(_) => {
1781 return;
1783 }
1784 };
1785
1786 let val_arr = get_args_from_tuple(bx, args[2], fn_diff);
1787 let diff_symbol = symbol_name_for_instance_in_crate(tcx, fn_diff.clone(), LOCAL_CRATE);
1788
1789 let Some(Some(mut diff_attrs)) =
1790 {
{
'done:
{
for i in
::rustc_hir::attrs::HasAttrs::get_attrs(fn_diff.def_id(),
&tcx) {
#[allow(unused_imports)]
use rustc_hir::attrs::AttributeKind::*;
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(RustcAutodiff(attr)) => {
break 'done Some(attr.clone());
}
rustc_hir::Attribute::Unparsed(..) =>
{}
#[deny(unreachable_patterns)]
_ => {}
}
}
None
}
}
}find_attr!(tcx, fn_diff.def_id(), RustcAutodiff(attr) => attr.clone())
1791 else {
1792 ::rustc_middle::util::bug::bug_fmt(format_args!("could not find autodiff attrs"))bug!("could not find autodiff attrs")
1793 };
1794
1795 adjust_activity_to_abi(
1796 tcx,
1797 source_fn_ptr_ty,
1798 TypingEnv::fully_monomorphized(),
1799 &mut diff_attrs.input_activity,
1800 );
1801
1802 let fnc_tree = rustc_middle::ty::fnc_typetrees(tcx, source_fn_ptr_ty);
1803
1804 generate_enzyme_call(
1806 bx,
1807 bx.cx,
1808 fn_to_diff,
1809 &diff_symbol,
1810 llret_ty,
1811 &val_arr,
1812 &diff_attrs,
1813 result,
1814 fnc_tree,
1815 );
1816}
1817
1818fn codegen_offload<'ll, 'tcx>(
1823 bx: &mut Builder<'_, 'll, 'tcx>,
1824 tcx: TyCtxt<'tcx>,
1825 instance: ty::Instance<'tcx>,
1826 args: &[OperandRef<'tcx, &'ll Value>],
1827) {
1828 let cx = bx.cx;
1829 let fn_args = instance.args;
1830
1831 let (target_id, target_args) = match fn_args.into_type_list(tcx)[0].kind() {
1832 ty::FnDef(def_id, params) => (def_id, params),
1833 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("invalid offload intrinsic arg"))bug!("invalid offload intrinsic arg"),
1834 };
1835
1836 let fn_target = match Instance::try_resolve(tcx, cx.typing_env(), *target_id, target_args) {
1837 Ok(Some(instance)) => instance,
1838 Ok(None) => ::rustc_middle::util::bug::bug_fmt(format_args!("could not resolve ({0:?}, {1:?}) to a specific offload instance",
target_id, target_args))bug!(
1839 "could not resolve ({:?}, {:?}) to a specific offload instance",
1840 target_id,
1841 target_args
1842 ),
1843 Err(_) => {
1844 return;
1846 }
1847 };
1848
1849 let offload_dims = OffloadKernelDims::from_operands(bx, &args[1], &args[2]);
1850 let args = get_args_from_tuple(bx, args[3], fn_target);
1851 let target_symbol = symbol_name_for_instance_in_crate(tcx, fn_target, LOCAL_CRATE);
1852
1853 let sig = tcx.fn_sig(fn_target.def_id()).skip_binder();
1854 let sig = tcx.instantiate_bound_regions_with_erased(sig);
1855 let inputs = sig.inputs();
1856
1857 let fn_abi = cx.fn_abi_of_instance(fn_target, ty::List::empty());
1858
1859 let mut metadata = Vec::new();
1860 let mut types = Vec::new();
1861
1862 for (i, arg_abi) in fn_abi.args.iter().enumerate() {
1863 let ty = inputs[i];
1864 let decomposed = OffloadMetadata::handle_abi(cx, tcx, ty, arg_abi);
1865
1866 for (meta, entry_ty) in decomposed {
1867 metadata.push(meta);
1868 types.push(bx.cx.layout_of(entry_ty).llvm_type(bx.cx));
1869 }
1870 }
1871
1872 let offload_globals_ref = cx.offload_globals.borrow();
1873 let offload_globals = match offload_globals_ref.as_ref() {
1874 Some(globals) => globals,
1875 None => {
1876 return;
1878 }
1879 };
1880 register_offload(cx);
1881 let offload_data = gen_define_handling(&cx, &metadata, target_symbol, offload_globals);
1882 gen_call_handling(bx, &offload_data, &args, &types, &metadata, offload_globals, &offload_dims);
1883}
1884
1885fn get_args_from_tuple<'ll, 'tcx>(
1886 bx: &mut Builder<'_, 'll, 'tcx>,
1887 tuple_op: OperandRef<'tcx, &'ll Value>,
1888 fn_instance: Instance<'tcx>,
1889) -> Vec<&'ll Value> {
1890 let cx = bx.cx;
1891 let fn_abi = cx.fn_abi_of_instance(fn_instance, ty::List::empty());
1892
1893 match tuple_op.val {
1894 OperandValue::Immediate(val) => ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[val]))vec![val],
1895 OperandValue::Pair(v1, v2) => ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[v1, v2]))vec![v1, v2],
1896 OperandValue::Ref(ptr) => {
1897 let tuple_place = PlaceRef { val: ptr, layout: tuple_op.layout };
1898
1899 let mut result = Vec::with_capacity(fn_abi.args.len());
1900 let mut tuple_index = 0;
1901
1902 for arg in &fn_abi.args {
1903 match arg.mode {
1904 PassMode::Ignore => {}
1905 PassMode::Direct(_) | PassMode::Cast { .. } => {
1906 let field = tuple_place.project_field(bx, tuple_index);
1907 let llvm_ty = field.layout.llvm_type(bx.cx);
1908 let val = bx.load(llvm_ty, field.val.llval, field.val.align);
1909 result.push(val);
1910 tuple_index += 1;
1911 }
1912 PassMode::Pair(_, _) => {
1913 let field = tuple_place.project_field(bx, tuple_index);
1914 let llvm_ty = field.layout.llvm_type(bx.cx);
1915 let pair_val = bx.load(llvm_ty, field.val.llval, field.val.align);
1916 result.push(bx.extract_value(pair_val, 0));
1917 result.push(bx.extract_value(pair_val, 1));
1918 tuple_index += 1;
1919 }
1920 PassMode::Indirect { .. } => {
1921 let field = tuple_place.project_field(bx, tuple_index);
1922 result.push(field.val.llval);
1923 tuple_index += 1;
1924 }
1925 }
1926 }
1927
1928 result
1929 }
1930
1931 OperandValue::ZeroSized => ::alloc::vec::Vec::new()vec![],
1932 }
1933}
1934
1935fn generic_simd_intrinsic<'ll, 'tcx>(
1936 bx: &mut Builder<'_, 'll, 'tcx>,
1937 name: Symbol,
1938 fn_args: GenericArgsRef<'tcx>,
1939 args: &[OperandRef<'tcx, &'ll Value>],
1940 ret_ty: Ty<'tcx>,
1941 llret_ty: &'ll Type,
1942 span: Span,
1943) -> Result<&'ll Value, ()> {
1944 macro_rules! return_error {
1945 ($diag: expr) => {{
1946 bx.sess().dcx().emit_err($diag);
1947 return Err(());
1948 }};
1949 }
1950
1951 macro_rules! require {
1952 ($cond: expr, $diag: expr) => {
1953 if !$cond {
1954 return_error!($diag);
1955 }
1956 };
1957 }
1958
1959 macro_rules! require_simd {
1960 ($ty: expr, $variant:ident) => {{
1961 require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
1962 $ty.simd_size_and_type(bx.tcx())
1963 }};
1964 }
1965
1966 macro_rules! require_simd_or_scalable {
1967 ($ty: expr, $variant:ident) => {{
1968 require!(
1969 $ty.is_simd() || $ty.is_scalable_vector(),
1970 InvalidMonomorphization::$variant { span, name, ty: $ty }
1971 );
1972 if $ty.is_simd() {
1973 let (len, ty) = $ty.simd_size_and_type(bx.tcx());
1974 (len, ty, None)
1975 } else {
1976 let (count, ty, num_vecs) =
1977 $ty.scalable_vector_parts(bx.tcx()).expect("`is_scalable_vector` was wrong");
1978 (count as u64, ty, Some(num_vecs))
1979 }
1980 }};
1981 }
1982
1983 macro_rules! require_int_or_uint_ty {
1985 ($ty: expr, $diag: expr) => {
1986 match $ty {
1987 ty::Int(i) => {
1988 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1989 }
1990 ty::Uint(i) => {
1991 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1992 }
1993 _ => {
1994 return_error!($diag);
1995 }
1996 }
1997 };
1998 }
1999
2000 let llvm_version = crate::llvm_util::get_version();
2001
2002 fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
2016 bx: &mut Builder<'a, 'll, 'tcx>,
2017 i_xn: &'ll Value,
2018 in_elem_bitwidth: u64,
2019 in_len: u64,
2020 ) -> &'ll Value {
2021 let shift_idx = bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
2023 let shift_indices = ::alloc::vec::from_elem(shift_idx, in_len as _)vec![shift_idx; in_len as _];
2024 let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
2025 bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
2027 }
2028
2029 if truecfg!(debug_assertions) {
2031 for arg in args {
2032 if arg.layout.ty.is_simd() {
2033 {
match arg.val {
OperandValue::Immediate(_) => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"OperandValue::Immediate(_)", ::core::option::Option::None);
}
}
};assert_matches!(arg.val, OperandValue::Immediate(_));
2034 }
2035 }
2036 }
2037
2038 if name == sym::simd_select_bitmask {
2039 let (len, _) = {
if !args[1].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdArgument {
span,
name,
ty: args[1].layout.ty,
});
return Err(());
};
};
args[1].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[1].layout.ty, SimdArgument);
2040
2041 let expected_int_bits = len.max(8).next_power_of_two();
2042 let expected_bytes = len.div_ceil(8);
2043
2044 let mask_ty = args[0].layout.ty;
2045 let mask = match mask_ty.kind() {
2046 ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
2047 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
2048 ty::Array(elem, len)
2049 if #[allow(non_exhaustive_omitted_patterns)] match elem.kind() {
ty::Uint(ty::UintTy::U8) => true,
_ => false,
}matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
2050 && len
2051 .try_to_target_usize(bx.tcx)
2052 .expect("expected monomorphic const in codegen")
2053 == expected_bytes =>
2054 {
2055 let place = PlaceRef::alloca(bx, args[0].layout);
2056 args[0].val.store(bx, place);
2057 let int_ty = bx.type_ix(expected_bytes * 8);
2058 bx.load(int_ty, place.val.llval, Align::ONE)
2059 }
2060 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::InvalidBitmask {
span,
name,
mask_ty,
expected_int_bits,
expected_bytes,
});
return Err(());
}return_error!(InvalidMonomorphization::InvalidBitmask {
2061 span,
2062 name,
2063 mask_ty,
2064 expected_int_bits,
2065 expected_bytes
2066 }),
2067 };
2068
2069 let i1 = bx.type_i1();
2070 let im = bx.type_ix(len);
2071 let i1xn = bx.type_vector(i1, len);
2072 let m_im = bx.trunc(mask, im);
2073 let m_i1s = bx.bitcast(m_im, i1xn);
2074 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
2075 }
2076
2077 if name == sym::simd_splat {
2078 let (_out_len, out_ty) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
2079
2080 if !(args[0].layout.ty == out_ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedVectorElementType {
span,
name,
expected_element: out_ty,
vector_type: ret_ty,
});
return Err(());
};
};require!(
2081 args[0].layout.ty == out_ty,
2082 InvalidMonomorphization::ExpectedVectorElementType {
2083 span,
2084 name,
2085 expected_element: out_ty,
2086 vector_type: ret_ty,
2087 }
2088 );
2089
2090 let poison_vec = bx.const_poison(llret_ty);
2092 let idx0 = bx.const_i32(0);
2093 let v0 = bx.insert_element(poison_vec, args[0].immediate(), idx0);
2094
2095 let splat = bx.shuffle_vector(v0, poison_vec, bx.const_null(llret_ty));
2098
2099 return Ok(splat);
2100 }
2101
2102 let supports_scalable = match name {
2103 sym::simd_cast | sym::simd_select => true,
2104 _ => false,
2105 };
2106
2107 if !supports_scalable {
2112 let _ = {
if !args[0].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdInput {
span,
name,
ty: args[0].layout.ty,
});
return Err(());
};
};
args[0].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[0].layout.ty, SimdInput);
2113 }
2114 let (in_len, in_elem, in_num_vecs) = {
if !(args[0].layout.ty.is_simd() ||
args[0].layout.ty.is_scalable_vector()) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdInput {
span,
name,
ty: args[0].layout.ty,
});
return Err(());
};
};
if args[0].layout.ty.is_simd() {
let (len, ty) = args[0].layout.ty.simd_size_and_type(bx.tcx());
(len, ty, None)
} else {
let (count, ty, num_vecs) =
args[0].layout.ty.scalable_vector_parts(bx.tcx()).expect("`is_scalable_vector` was wrong");
(count as u64, ty, Some(num_vecs))
}
}require_simd_or_scalable!(args[0].layout.ty, SimdInput);
2115 let in_ty = args[0].layout.ty;
2116
2117 let comparison = match name {
2118 sym::simd_eq => Some(BinOp::Eq),
2119 sym::simd_ne => Some(BinOp::Ne),
2120 sym::simd_lt => Some(BinOp::Lt),
2121 sym::simd_le => Some(BinOp::Le),
2122 sym::simd_gt => Some(BinOp::Gt),
2123 sym::simd_ge => Some(BinOp::Ge),
2124 _ => None,
2125 };
2126
2127 if let Some(cmp_op) = comparison {
2128 let (out_len, out_ty) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
2129
2130 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len,
});
return Err(());
};
};require!(
2131 in_len == out_len,
2132 InvalidMonomorphization::ReturnLengthInputType {
2133 span,
2134 name,
2135 in_len,
2136 in_ty,
2137 ret_ty,
2138 out_len
2139 }
2140 );
2141 if !(bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnIntegerType {
span,
name,
ret_ty,
out_ty,
});
return Err(());
};
};require!(
2142 bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
2143 InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
2144 );
2145
2146 return Ok(compare_simd_types(
2147 bx,
2148 args[0].immediate(),
2149 args[1].immediate(),
2150 in_elem,
2151 llret_ty,
2152 cmp_op,
2153 ));
2154 }
2155
2156 if name == sym::simd_shuffle_const_generic {
2157 let idx = fn_args[2].expect_const().to_branch();
2158 let n = idx.len() as u64;
2159
2160 let (out_len, out_ty) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
2161 if !(out_len == n) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLength {
span,
name,
in_len: n,
ret_ty,
out_len,
});
return Err(());
};
};require!(
2162 out_len == n,
2163 InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
2164 );
2165 if !(in_elem == out_ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnElement {
span,
name,
in_elem,
in_ty,
ret_ty,
out_ty,
});
return Err(());
};
};require!(
2166 in_elem == out_ty,
2167 InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
2168 );
2169
2170 let total_len = in_len * 2;
2171
2172 let indices: Option<Vec<_>> = idx
2173 .iter()
2174 .enumerate()
2175 .map(|(arg_idx, val)| {
2176 let idx = val.to_leaf().to_i32();
2177 if idx >= i32::try_from(total_len).unwrap() {
2178 bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
2179 span,
2180 name,
2181 arg_idx: arg_idx as u64,
2182 total_len: total_len.into(),
2183 });
2184 None
2185 } else {
2186 Some(bx.const_i32(idx))
2187 }
2188 })
2189 .collect();
2190 let Some(indices) = indices else {
2191 return Ok(bx.const_null(llret_ty));
2192 };
2193
2194 return Ok(bx.shuffle_vector(
2195 args[0].immediate(),
2196 args[1].immediate(),
2197 bx.const_vector(&indices),
2198 ));
2199 }
2200
2201 if name == sym::simd_shuffle {
2202 let idx_ty = args[2].layout.ty;
2204 let n: u64 = if idx_ty.is_simd()
2205 && #[allow(non_exhaustive_omitted_patterns)] match idx_ty.simd_size_and_type(bx.cx.tcx).1.kind()
{
ty::Uint(ty::UintTy::U32) => true,
_ => false,
}matches!(idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32))
2206 {
2207 idx_ty.simd_size_and_type(bx.cx.tcx).0
2208 } else {
2209 {
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdShuffle {
span,
name,
ty: idx_ty,
});
return Err(());
}return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty })
2210 };
2211
2212 let (out_len, out_ty) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
2213 if !(out_len == n) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLength {
span,
name,
in_len: n,
ret_ty,
out_len,
});
return Err(());
};
};require!(
2214 out_len == n,
2215 InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
2216 );
2217 if !(in_elem == out_ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnElement {
span,
name,
in_elem,
in_ty,
ret_ty,
out_ty,
});
return Err(());
};
};require!(
2218 in_elem == out_ty,
2219 InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
2220 );
2221
2222 let total_len = u128::from(in_len) * 2;
2223
2224 let indices = args[2].immediate();
2226 for i in 0..n {
2227 let val = bx.const_get_elt(indices, i as u64);
2228 let idx = bx
2229 .const_to_opt_u128(val, true)
2230 .unwrap_or_else(|| ::rustc_middle::util::bug::bug_fmt(format_args!("typeck should have already ensured that these are const"))bug!("typeck should have already ensured that these are const"));
2231 if idx >= total_len {
2232 {
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
span,
name,
arg_idx: i,
total_len,
});
return Err(());
};return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
2233 span,
2234 name,
2235 arg_idx: i,
2236 total_len,
2237 });
2238 }
2239 }
2240
2241 return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), indices));
2242 }
2243
2244 if name == sym::simd_insert || name == sym::simd_insert_dyn {
2245 if !(in_elem == args[2].layout.ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::InsertedType {
span,
name,
in_elem,
in_ty,
out_ty: args[2].layout.ty,
});
return Err(());
};
};require!(
2246 in_elem == args[2].layout.ty,
2247 InvalidMonomorphization::InsertedType {
2248 span,
2249 name,
2250 in_elem,
2251 in_ty,
2252 out_ty: args[2].layout.ty
2253 }
2254 );
2255
2256 let index_imm = if name == sym::simd_insert {
2257 let idx = bx
2258 .const_to_opt_u128(args[1].immediate(), false)
2259 .expect("typeck should have ensure that this is a const");
2260 if idx >= in_len.into() {
2261 {
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
span,
name,
arg_idx: 1,
total_len: in_len.into(),
});
return Err(());
};return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
2262 span,
2263 name,
2264 arg_idx: 1,
2265 total_len: in_len.into(),
2266 });
2267 }
2268 bx.const_i32(idx as i32)
2269 } else {
2270 args[1].immediate()
2271 };
2272
2273 return Ok(bx.insert_element(args[0].immediate(), args[2].immediate(), index_imm));
2274 }
2275 if name == sym::simd_extract || name == sym::simd_extract_dyn {
2276 if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};require!(
2277 ret_ty == in_elem,
2278 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2279 );
2280 let index_imm = if name == sym::simd_extract {
2281 let idx = bx
2282 .const_to_opt_u128(args[1].immediate(), false)
2283 .expect("typeck should have ensure that this is a const");
2284 if idx >= in_len.into() {
2285 {
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
span,
name,
arg_idx: 1,
total_len: in_len.into(),
});
return Err(());
};return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
2286 span,
2287 name,
2288 arg_idx: 1,
2289 total_len: in_len.into(),
2290 });
2291 }
2292 bx.const_i32(idx as i32)
2293 } else {
2294 args[1].immediate()
2295 };
2296
2297 return Ok(bx.extract_element(args[0].immediate(), index_imm));
2298 }
2299
2300 if name == sym::simd_select {
2301 let m_elem_ty = in_elem;
2302 let m_len = in_len;
2303 let (v_len, _, _) = {
if !(args[1].layout.ty.is_simd() ||
args[1].layout.ty.is_scalable_vector()) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdArgument {
span,
name,
ty: args[1].layout.ty,
});
return Err(());
};
};
if args[1].layout.ty.is_simd() {
let (len, ty) = args[1].layout.ty.simd_size_and_type(bx.tcx());
(len, ty, None)
} else {
let (count, ty, num_vecs) =
args[1].layout.ty.scalable_vector_parts(bx.tcx()).expect("`is_scalable_vector` was wrong");
(count as u64, ty, Some(num_vecs))
}
}require_simd_or_scalable!(args[1].layout.ty, SimdArgument);
2304 if !(m_len == v_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MismatchedLengths {
span,
name,
m_len,
v_len,
});
return Err(());
};
};require!(
2305 m_len == v_len,
2306 InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
2307 );
2308
2309 let m_i1s = if args[1].layout.ty.is_scalable_vector() {
2310 match m_elem_ty.kind() {
2311 ty::Bool => {}
2312 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: m_elem_ty,
});
return Err(());
}return_error!(InvalidMonomorphization::MaskWrongElementType {
2313 span,
2314 name,
2315 ty: m_elem_ty
2316 }),
2317 };
2318 let i1 = bx.type_i1();
2319 let i1xn = bx.type_scalable_vector(i1, m_len as u64);
2320 bx.trunc(args[0].immediate(), i1xn)
2321 } else {
2322 let in_elem_bitwidth = match m_elem_ty.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: m_elem_ty,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2323 m_elem_ty.kind(),
2324 InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty }
2325 );
2326 vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len)
2327 };
2328
2329 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
2330 }
2331
2332 if name == sym::simd_bitmask {
2333 let expected_int_bits = in_len.max(8).next_power_of_two();
2342 let expected_bytes = in_len.div_ceil(8);
2343
2344 let in_elem_bitwidth = match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: in_elem,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2346 in_elem.kind(),
2347 InvalidMonomorphization::MaskWrongElementType { span, name, ty: in_elem }
2348 );
2349
2350 let i1xn = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, in_len);
2351 let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
2353
2354 match ret_ty.kind() {
2355 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
2356 return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
2358 }
2359 ty::Array(elem, len)
2360 if #[allow(non_exhaustive_omitted_patterns)] match elem.kind() {
ty::Uint(ty::UintTy::U8) => true,
_ => false,
}matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
2361 && len
2362 .try_to_target_usize(bx.tcx)
2363 .expect("expected monomorphic const in codegen")
2364 == expected_bytes =>
2365 {
2366 let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
2368
2369 let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
2371 bx.store(ze, ptr, Align::ONE);
2372 let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
2373 return Ok(bx.load(array_ty, ptr, Align::ONE));
2374 }
2375 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::CannotReturn {
span,
name,
ret_ty,
expected_int_bits,
expected_bytes,
});
return Err(());
}return_error!(InvalidMonomorphization::CannotReturn {
2376 span,
2377 name,
2378 ret_ty,
2379 expected_int_bits,
2380 expected_bytes
2381 }),
2382 }
2383 }
2384
2385 fn simd_simple_float_intrinsic<'ll, 'tcx>(
2386 name: Symbol,
2387 in_elem: Ty<'_>,
2388 in_ty: Ty<'_>,
2389 in_len: u64,
2390 bx: &mut Builder<'_, 'll, 'tcx>,
2391 span: Span,
2392 args: &[OperandRef<'tcx, &'ll Value>],
2393 ) -> Result<&'ll Value, ()> {
2394 macro_rules! return_error {
2395 ($diag: expr) => {{
2396 bx.sess().dcx().emit_err($diag);
2397 return Err(());
2398 }};
2399 }
2400
2401 let ty::Float(f) = in_elem.kind() else {
2402 {
bx.sess().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
span,
name,
ty: in_ty,
});
return Err(());
};return_error!(InvalidMonomorphization::BasicFloatType { span, name, ty: in_ty });
2403 };
2404 let elem_ty = bx.cx.type_float_from_ty(*f);
2405
2406 let vec_ty = bx.type_vector(elem_ty, in_len);
2407
2408 let intr_name = match name {
2409 sym::simd_ceil => "llvm.ceil",
2410 sym::simd_fabs => "llvm.fabs",
2411 sym::simd_fcos => "llvm.cos",
2412 sym::simd_fexp2 => "llvm.exp2",
2413 sym::simd_fexp => "llvm.exp",
2414 sym::simd_flog10 => "llvm.log10",
2415 sym::simd_flog2 => "llvm.log2",
2416 sym::simd_flog => "llvm.log",
2417 sym::simd_floor => "llvm.floor",
2418 sym::simd_fma => "llvm.fma",
2419 sym::simd_relaxed_fma => "llvm.fmuladd",
2420 sym::simd_fsin => "llvm.sin",
2421 sym::simd_fsqrt => "llvm.sqrt",
2422 sym::simd_round => "llvm.round",
2423 sym::simd_round_ties_even => "llvm.rint",
2424 sym::simd_trunc => "llvm.trunc",
2425 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnrecognizedIntrinsic {
span,
name,
});
return Err(());
}return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
2426 };
2427 Ok(bx.call_intrinsic(
2428 intr_name,
2429 &[vec_ty],
2430 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
2431 ))
2432 }
2433
2434 if #[allow(non_exhaustive_omitted_patterns)] match name {
sym::simd_ceil | sym::simd_fabs | sym::simd_fcos | sym::simd_fexp2 |
sym::simd_fexp | sym::simd_flog10 | sym::simd_flog2 | sym::simd_flog |
sym::simd_floor | sym::simd_fma | sym::simd_fsin | sym::simd_fsqrt |
sym::simd_relaxed_fma | sym::simd_round | sym::simd_round_ties_even |
sym::simd_trunc => true,
_ => false,
}std::matches!(
2435 name,
2436 sym::simd_ceil
2437 | sym::simd_fabs
2438 | sym::simd_fcos
2439 | sym::simd_fexp2
2440 | sym::simd_fexp
2441 | sym::simd_flog10
2442 | sym::simd_flog2
2443 | sym::simd_flog
2444 | sym::simd_floor
2445 | sym::simd_fma
2446 | sym::simd_fsin
2447 | sym::simd_fsqrt
2448 | sym::simd_relaxed_fma
2449 | sym::simd_round
2450 | sym::simd_round_ties_even
2451 | sym::simd_trunc
2452 ) {
2453 return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
2454 }
2455
2456 fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
2457 let elem_ty = match *elem_ty.kind() {
2458 ty::Int(v) => cx.type_int_from_ty(v),
2459 ty::Uint(v) => cx.type_uint_from_ty(v),
2460 ty::Float(v) => cx.type_float_from_ty(v),
2461 ty::RawPtr(_, _) => cx.type_ptr(),
2462 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
2463 };
2464 cx.type_vector(elem_ty, vec_len)
2465 }
2466
2467 if name == sym::simd_gather {
2468 let (_, element_ty0) = {
if !in_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdFirst {
span,
name,
ty: in_ty,
});
return Err(());
};
};
in_ty.simd_size_and_type(bx.tcx())
}require_simd!(in_ty, SimdFirst);
2479 let (out_len, element_ty1) = {
if !args[1].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdSecond {
span,
name,
ty: args[1].layout.ty,
});
return Err(());
};
};
args[1].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[1].layout.ty, SimdSecond);
2480 let (out_len2, element_ty2) = {
if !args[2].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdThird {
span,
name,
ty: args[2].layout.ty,
});
return Err(());
};
};
args[2].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[2].layout.ty, SimdThird);
2482 {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
};require_simd!(ret_ty, SimdReturn);
2483
2484 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SecondArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[1].layout.ty,
out_len,
});
return Err(());
};
};require!(
2486 in_len == out_len,
2487 InvalidMonomorphization::SecondArgumentLength {
2488 span,
2489 name,
2490 in_len,
2491 in_ty,
2492 arg_ty: args[1].layout.ty,
2493 out_len
2494 }
2495 );
2496 if !(in_len == out_len2) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[2].layout.ty,
out_len: out_len2,
});
return Err(());
};
};require!(
2497 in_len == out_len2,
2498 InvalidMonomorphization::ThirdArgumentLength {
2499 span,
2500 name,
2501 in_len,
2502 in_ty,
2503 arg_ty: args[2].layout.ty,
2504 out_len: out_len2
2505 }
2506 );
2507
2508 if !(ret_ty == in_ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedReturnType {
span,
name,
in_ty,
ret_ty,
});
return Err(());
};
};require!(
2510 ret_ty == in_ty,
2511 InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
2512 );
2513
2514 if !#[allow(non_exhaustive_omitted_patterns)] match *element_ty1.kind() {
ty::RawPtr(p_ty, _) if
p_ty == in_elem && p_ty.kind() == element_ty0.kind() => true,
_ => false,
} {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: element_ty1,
second_arg: args[1].layout.ty,
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Not,
});
return Err(());
};
};require!(
2515 matches!(
2516 *element_ty1.kind(),
2517 ty::RawPtr(p_ty, _) if p_ty == in_elem && p_ty.kind() == element_ty0.kind()
2518 ),
2519 InvalidMonomorphization::ExpectedElementType {
2520 span,
2521 name,
2522 expected_element: element_ty1,
2523 second_arg: args[1].layout.ty,
2524 in_elem,
2525 in_ty,
2526 mutability: ExpectedPointerMutability::Not,
2527 }
2528 );
2529
2530 let mask_elem_bitwidth = match element_ty2.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: element_ty2,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2531 element_ty2.kind(),
2532 InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
2533 );
2534
2535 let alignment = bx.align_of(in_elem).bytes();
2537
2538 let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
2540
2541 let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
2543
2544 let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
2546
2547 let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2548 let alignment = bx.const_i32(alignment as i32);
2549 &[args[1].immediate(), alignment, mask, args[0].immediate()]
2550 } else {
2551 &[args[1].immediate(), mask, args[0].immediate()]
2552 };
2553
2554 let call =
2555 bx.call_intrinsic("llvm.masked.gather", &[llvm_elem_vec_ty, llvm_pointer_vec_ty], args);
2556 if llvm_version >= (22, 0, 0) {
2557 crate::attributes::apply_to_callsite(
2558 call,
2559 crate::llvm::AttributePlace::Argument(0),
2560 &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2561 )
2562 }
2563 return Ok(call);
2564 }
2565
2566 fn llvm_alignment<'ll, 'tcx>(
2567 bx: &mut Builder<'_, 'll, 'tcx>,
2568 alignment: SimdAlign,
2569 vector_ty: Ty<'tcx>,
2570 element_ty: Ty<'tcx>,
2571 ) -> u64 {
2572 match alignment {
2573 SimdAlign::Unaligned => 1,
2574 SimdAlign::Element => bx.align_of(element_ty).bytes(),
2575 SimdAlign::Vector => bx.align_of(vector_ty).bytes(),
2576 }
2577 }
2578
2579 if name == sym::simd_masked_load {
2580 let alignment = fn_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
2589
2590 let mask_ty = in_ty;
2592 let (mask_len, mask_elem) = (in_len, in_elem);
2593
2594 let pointer_ty = args[1].layout.ty;
2596
2597 let values_ty = args[2].layout.ty;
2599 let (values_len, values_elem) = {
if !values_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdThird {
span,
name,
ty: values_ty,
});
return Err(());
};
};
values_ty.simd_size_and_type(bx.tcx())
}require_simd!(values_ty, SimdThird);
2600
2601 {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
};require_simd!(ret_ty, SimdReturn);
2602
2603 if !(values_len == mask_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len: mask_len,
in_ty: mask_ty,
arg_ty: values_ty,
out_len: values_len,
});
return Err(());
};
};require!(
2605 values_len == mask_len,
2606 InvalidMonomorphization::ThirdArgumentLength {
2607 span,
2608 name,
2609 in_len: mask_len,
2610 in_ty: mask_ty,
2611 arg_ty: values_ty,
2612 out_len: values_len
2613 }
2614 );
2615
2616 if !(ret_ty == values_ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedReturnType {
span,
name,
in_ty: values_ty,
ret_ty,
});
return Err(());
};
};require!(
2618 ret_ty == values_ty,
2619 InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
2620 );
2621
2622 if !#[allow(non_exhaustive_omitted_patterns)] match *pointer_ty.kind() {
ty::RawPtr(p_ty, _) if
p_ty == values_elem && p_ty.kind() == values_elem.kind() =>
true,
_ => false,
} {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: values_elem,
second_arg: pointer_ty,
in_elem: values_elem,
in_ty: values_ty,
mutability: ExpectedPointerMutability::Not,
});
return Err(());
};
};require!(
2623 matches!(
2624 *pointer_ty.kind(),
2625 ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind()
2626 ),
2627 InvalidMonomorphization::ExpectedElementType {
2628 span,
2629 name,
2630 expected_element: values_elem,
2631 second_arg: pointer_ty,
2632 in_elem: values_elem,
2633 in_ty: values_ty,
2634 mutability: ExpectedPointerMutability::Not,
2635 }
2636 );
2637
2638 let m_elem_bitwidth = match mask_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: mask_elem,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2639 mask_elem.kind(),
2640 InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
2641 );
2642
2643 let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
2644
2645 let alignment = llvm_alignment(bx, alignment, values_ty, values_elem);
2647
2648 let llvm_pointer = bx.type_ptr();
2649
2650 let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
2652
2653 let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2654 let alignment = bx.const_i32(alignment as i32);
2655
2656 &[args[1].immediate(), alignment, mask, args[2].immediate()]
2657 } else {
2658 &[args[1].immediate(), mask, args[2].immediate()]
2659 };
2660
2661 let call = bx.call_intrinsic("llvm.masked.load", &[llvm_elem_vec_ty, llvm_pointer], args);
2662 if llvm_version >= (22, 0, 0) {
2663 crate::attributes::apply_to_callsite(
2664 call,
2665 crate::llvm::AttributePlace::Argument(0),
2666 &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2667 )
2668 }
2669 return Ok(call);
2670 }
2671
2672 if name == sym::simd_masked_store {
2673 let alignment = fn_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
2682
2683 let mask_ty = in_ty;
2685 let (mask_len, mask_elem) = (in_len, in_elem);
2686
2687 let pointer_ty = args[1].layout.ty;
2689
2690 let values_ty = args[2].layout.ty;
2692 let (values_len, values_elem) = {
if !values_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdThird {
span,
name,
ty: values_ty,
});
return Err(());
};
};
values_ty.simd_size_and_type(bx.tcx())
}require_simd!(values_ty, SimdThird);
2693
2694 if !(values_len == mask_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len: mask_len,
in_ty: mask_ty,
arg_ty: values_ty,
out_len: values_len,
});
return Err(());
};
};require!(
2696 values_len == mask_len,
2697 InvalidMonomorphization::ThirdArgumentLength {
2698 span,
2699 name,
2700 in_len: mask_len,
2701 in_ty: mask_ty,
2702 arg_ty: values_ty,
2703 out_len: values_len
2704 }
2705 );
2706
2707 if !#[allow(non_exhaustive_omitted_patterns)] match *pointer_ty.kind() {
ty::RawPtr(p_ty, p_mutbl) if
p_ty == values_elem && p_ty.kind() == values_elem.kind() &&
p_mutbl.is_mut() => true,
_ => false,
} {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: values_elem,
second_arg: pointer_ty,
in_elem: values_elem,
in_ty: values_ty,
mutability: ExpectedPointerMutability::Mut,
});
return Err(());
};
};require!(
2709 matches!(
2710 *pointer_ty.kind(),
2711 ty::RawPtr(p_ty, p_mutbl)
2712 if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
2713 ),
2714 InvalidMonomorphization::ExpectedElementType {
2715 span,
2716 name,
2717 expected_element: values_elem,
2718 second_arg: pointer_ty,
2719 in_elem: values_elem,
2720 in_ty: values_ty,
2721 mutability: ExpectedPointerMutability::Mut,
2722 }
2723 );
2724
2725 let m_elem_bitwidth = match mask_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: mask_elem,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2726 mask_elem.kind(),
2727 InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
2728 );
2729
2730 let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
2731
2732 let alignment = llvm_alignment(bx, alignment, values_ty, values_elem);
2734
2735 let llvm_pointer = bx.type_ptr();
2736
2737 let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
2739
2740 let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2741 let alignment = bx.const_i32(alignment as i32);
2742 &[args[2].immediate(), args[1].immediate(), alignment, mask]
2743 } else {
2744 &[args[2].immediate(), args[1].immediate(), mask]
2745 };
2746
2747 let call = bx.call_intrinsic("llvm.masked.store", &[llvm_elem_vec_ty, llvm_pointer], args);
2748 if llvm_version >= (22, 0, 0) {
2749 crate::attributes::apply_to_callsite(
2750 call,
2751 crate::llvm::AttributePlace::Argument(1),
2752 &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2753 )
2754 }
2755 return Ok(call);
2756 }
2757
2758 if name == sym::simd_scatter {
2759 let (_, element_ty0) = {
if !in_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdFirst {
span,
name,
ty: in_ty,
});
return Err(());
};
};
in_ty.simd_size_and_type(bx.tcx())
}require_simd!(in_ty, SimdFirst);
2769 let (element_len1, element_ty1) = {
if !args[1].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdSecond {
span,
name,
ty: args[1].layout.ty,
});
return Err(());
};
};
args[1].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[1].layout.ty, SimdSecond);
2770 let (element_len2, element_ty2) = {
if !args[2].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdThird {
span,
name,
ty: args[2].layout.ty,
});
return Err(());
};
};
args[2].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[2].layout.ty, SimdThird);
2771
2772 if !(in_len == element_len1) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SecondArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[1].layout.ty,
out_len: element_len1,
});
return Err(());
};
};require!(
2774 in_len == element_len1,
2775 InvalidMonomorphization::SecondArgumentLength {
2776 span,
2777 name,
2778 in_len,
2779 in_ty,
2780 arg_ty: args[1].layout.ty,
2781 out_len: element_len1
2782 }
2783 );
2784 if !(in_len == element_len2) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[2].layout.ty,
out_len: element_len2,
});
return Err(());
};
};require!(
2785 in_len == element_len2,
2786 InvalidMonomorphization::ThirdArgumentLength {
2787 span,
2788 name,
2789 in_len,
2790 in_ty,
2791 arg_ty: args[2].layout.ty,
2792 out_len: element_len2
2793 }
2794 );
2795
2796 if !#[allow(non_exhaustive_omitted_patterns)] match *element_ty1.kind() {
ty::RawPtr(p_ty, p_mutbl) if
p_ty == in_elem && p_mutbl.is_mut() &&
p_ty.kind() == element_ty0.kind() => true,
_ => false,
} {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: element_ty1,
second_arg: args[1].layout.ty,
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Mut,
});
return Err(());
};
};require!(
2797 matches!(
2798 *element_ty1.kind(),
2799 ty::RawPtr(p_ty, p_mutbl)
2800 if p_ty == in_elem && p_mutbl.is_mut() && p_ty.kind() == element_ty0.kind()
2801 ),
2802 InvalidMonomorphization::ExpectedElementType {
2803 span,
2804 name,
2805 expected_element: element_ty1,
2806 second_arg: args[1].layout.ty,
2807 in_elem,
2808 in_ty,
2809 mutability: ExpectedPointerMutability::Mut,
2810 }
2811 );
2812
2813 let mask_elem_bitwidth = match element_ty2.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: element_ty2,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2815 element_ty2.kind(),
2816 InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
2817 );
2818
2819 let alignment = bx.align_of(in_elem).bytes();
2821
2822 let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
2824
2825 let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
2827
2828 let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
2830 let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2831 let alignment = bx.const_i32(alignment as i32);
2832 &[args[0].immediate(), args[1].immediate(), alignment, mask]
2833 } else {
2834 &[args[0].immediate(), args[1].immediate(), mask]
2835 };
2836 let call = bx.call_intrinsic(
2837 "llvm.masked.scatter",
2838 &[llvm_elem_vec_ty, llvm_pointer_vec_ty],
2839 args,
2840 );
2841 if llvm_version >= (22, 0, 0) {
2842 crate::attributes::apply_to_callsite(
2843 call,
2844 crate::llvm::AttributePlace::Argument(1),
2845 &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2846 )
2847 }
2848 return Ok(call);
2849 }
2850
2851 macro_rules! arith_red {
2852 ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
2853 $identity:expr) => {
2854 if name == sym::$name {
2855 require!(
2856 ret_ty == in_elem,
2857 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2858 );
2859 return match in_elem.kind() {
2860 ty::Int(_) | ty::Uint(_) => {
2861 let r = bx.$integer_reduce(args[0].immediate());
2862 if $ordered {
2863 Ok(bx.$op(args[1].immediate(), r))
2866 } else {
2867 Ok(bx.$integer_reduce(args[0].immediate()))
2868 }
2869 }
2870 ty::Float(f) => {
2871 let acc = if $ordered {
2872 args[1].immediate()
2874 } else {
2875 match f.bit_width() {
2877 32 => bx.const_real(bx.type_f32(), $identity),
2878 64 => bx.const_real(bx.type_f64(), $identity),
2879 v => return_error!(
2880 InvalidMonomorphization::UnsupportedSymbolOfSize {
2881 span,
2882 name,
2883 symbol: sym::$name,
2884 in_ty,
2885 in_elem,
2886 size: v,
2887 ret_ty
2888 }
2889 ),
2890 }
2891 };
2892 Ok(bx.$float_reduce(acc, args[0].immediate()))
2893 }
2894 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2895 span,
2896 name,
2897 symbol: sym::$name,
2898 in_ty,
2899 in_elem,
2900 ret_ty
2901 }),
2902 };
2903 }
2904 };
2905 }
2906
2907 if name == sym::simd_reduce_add_ordered {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_add(args[0].immediate());
if true {
Ok(bx.add(args[1].immediate(), r))
} else { Ok(bx.vector_reduce_add(args[0].immediate())) }
}
ty::Float(f) => {
let acc =
if true {
args[1].immediate()
} else {
match f.bit_width() {
32 => bx.const_real(bx.type_f32(), -0.0),
64 => bx.const_real(bx.type_f64(), -0.0),
v => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbolOfSize {
span,
name,
symbol: sym::simd_reduce_add_ordered,
in_ty,
in_elem,
size: v,
ret_ty,
});
return Err(());
}
}
};
Ok(bx.vector_reduce_fadd(acc, args[0].immediate()))
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_add_ordered,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, -0.0);
2908 if name == sym::simd_reduce_mul_ordered {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_mul(args[0].immediate());
if true {
Ok(bx.mul(args[1].immediate(), r))
} else { Ok(bx.vector_reduce_mul(args[0].immediate())) }
}
ty::Float(f) => {
let acc =
if true {
args[1].immediate()
} else {
match f.bit_width() {
32 => bx.const_real(bx.type_f32(), 1.0),
64 => bx.const_real(bx.type_f64(), 1.0),
v => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbolOfSize {
span,
name,
symbol: sym::simd_reduce_mul_ordered,
in_ty,
in_elem,
size: v,
ret_ty,
});
return Err(());
}
}
};
Ok(bx.vector_reduce_fmul(acc, args[0].immediate()))
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_mul_ordered,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
2909 if name == sym::simd_reduce_add_unordered {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_add(args[0].immediate());
if false {
Ok(bx.add(args[1].immediate(), r))
} else { Ok(bx.vector_reduce_add(args[0].immediate())) }
}
ty::Float(f) => {
let acc =
if false {
args[1].immediate()
} else {
match f.bit_width() {
32 => bx.const_real(bx.type_f32(), -0.0),
64 => bx.const_real(bx.type_f64(), -0.0),
v => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbolOfSize {
span,
name,
symbol: sym::simd_reduce_add_unordered,
in_ty,
in_elem,
size: v,
ret_ty,
});
return Err(());
}
}
};
Ok(bx.vector_reduce_fadd_reassoc(acc, args[0].immediate()))
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_add_unordered,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};arith_red!(
2910 simd_reduce_add_unordered: vector_reduce_add,
2911 vector_reduce_fadd_reassoc,
2912 false,
2913 add,
2914 -0.0
2915 );
2916 if name == sym::simd_reduce_mul_unordered {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_mul(args[0].immediate());
if false {
Ok(bx.mul(args[1].immediate(), r))
} else { Ok(bx.vector_reduce_mul(args[0].immediate())) }
}
ty::Float(f) => {
let acc =
if false {
args[1].immediate()
} else {
match f.bit_width() {
32 => bx.const_real(bx.type_f32(), 1.0),
64 => bx.const_real(bx.type_f64(), 1.0),
v => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbolOfSize {
span,
name,
symbol: sym::simd_reduce_mul_unordered,
in_ty,
in_elem,
size: v,
ret_ty,
});
return Err(());
}
}
};
Ok(bx.vector_reduce_fmul_reassoc(acc, args[0].immediate()))
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_mul_unordered,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};arith_red!(
2917 simd_reduce_mul_unordered: vector_reduce_mul,
2918 vector_reduce_fmul_reassoc,
2919 false,
2920 mul,
2921 1.0
2922 );
2923
2924 macro_rules! minmax_red {
2925 ($name:ident: $int_red:ident, $float_red:ident) => {
2926 if name == sym::$name {
2927 require!(
2928 ret_ty == in_elem,
2929 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2930 );
2931 return match in_elem.kind() {
2932 ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
2933 ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
2934 ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
2935 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2936 span,
2937 name,
2938 symbol: sym::$name,
2939 in_ty,
2940 in_elem,
2941 ret_ty
2942 }),
2943 };
2944 }
2945 };
2946 }
2947
2948 if name == sym::simd_reduce_min {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_i) =>
Ok(bx.vector_reduce_min(args[0].immediate(), true)),
ty::Uint(_u) =>
Ok(bx.vector_reduce_min(args[0].immediate(), false)),
ty::Float(_f) => Ok(bx.vector_reduce_fmin(args[0].immediate())),
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_min,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
2949 if name == sym::simd_reduce_max {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_i) =>
Ok(bx.vector_reduce_max(args[0].immediate(), true)),
ty::Uint(_u) =>
Ok(bx.vector_reduce_max(args[0].immediate(), false)),
ty::Float(_f) => Ok(bx.vector_reduce_fmax(args[0].immediate())),
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_max,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
2950
2951 macro_rules! bitwise_red {
2952 ($name:ident : $red:ident, $boolean:expr) => {
2953 if name == sym::$name {
2954 let input = if !$boolean {
2955 require!(
2956 ret_ty == in_elem,
2957 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2958 );
2959 args[0].immediate()
2960 } else {
2961 let bitwidth = match in_elem.kind() {
2962 ty::Int(i) => {
2963 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2964 }
2965 ty::Uint(i) => {
2966 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2967 }
2968 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2969 span,
2970 name,
2971 symbol: sym::$name,
2972 in_ty,
2973 in_elem,
2974 ret_ty
2975 }),
2976 };
2977
2978 vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth, in_len as _)
2979 };
2980 return match in_elem.kind() {
2981 ty::Int(_) | ty::Uint(_) => {
2982 let r = bx.$red(input);
2983 Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
2984 }
2985 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2986 span,
2987 name,
2988 symbol: sym::$name,
2989 in_ty,
2990 in_elem,
2991 ret_ty
2992 }),
2993 };
2994 }
2995 };
2996 }
2997
2998 if name == sym::simd_reduce_and {
let input =
if !false {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
args[0].immediate()
} else {
let bitwidth =
match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_and,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth,
in_len as _)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_and(input);
Ok(if !false { r } else { bx.zext(r, bx.type_bool()) })
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_and,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};bitwise_red!(simd_reduce_and: vector_reduce_and, false);
2999 if name == sym::simd_reduce_or {
let input =
if !false {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
args[0].immediate()
} else {
let bitwidth =
match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_or,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth,
in_len as _)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_or(input);
Ok(if !false { r } else { bx.zext(r, bx.type_bool()) })
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_or,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};bitwise_red!(simd_reduce_or: vector_reduce_or, false);
3000 if name == sym::simd_reduce_xor {
let input =
if !false {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
args[0].immediate()
} else {
let bitwidth =
match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_xor,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth,
in_len as _)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_xor(input);
Ok(if !false { r } else { bx.zext(r, bx.type_bool()) })
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_xor,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
3001 if name == sym::simd_reduce_all {
let input =
if !true {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
args[0].immediate()
} else {
let bitwidth =
match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_all,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth,
in_len as _)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_and(input);
Ok(if !true { r } else { bx.zext(r, bx.type_bool()) })
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_all,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};bitwise_red!(simd_reduce_all: vector_reduce_and, true);
3002 if name == sym::simd_reduce_any {
let input =
if !true {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
args[0].immediate()
} else {
let bitwidth =
match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_any,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth,
in_len as _)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_or(input);
Ok(if !true { r } else { bx.zext(r, bx.type_bool()) })
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_any,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};bitwise_red!(simd_reduce_any: vector_reduce_or, true);
3003
3004 if name == sym::simd_cast_ptr {
3005 let (out_len, out_elem) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
3006 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len,
});
return Err(());
};
};require!(
3007 in_len == out_len,
3008 InvalidMonomorphization::ReturnLengthInputType {
3009 span,
3010 name,
3011 in_len,
3012 in_ty,
3013 ret_ty,
3014 out_len
3015 }
3016 );
3017
3018 match in_elem.kind() {
3019 ty::RawPtr(p_ty, _) => {
3020 let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
3021 bx.tcx.normalize_erasing_regions(bx.typing_env(), Unnormalized::new_wip(ty))
3022 });
3023 if !metadata.is_unit() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::CastWidePointer {
span,
name,
ty: in_elem,
});
return Err(());
};
};require!(
3024 metadata.is_unit(),
3025 InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem }
3026 );
3027 }
3028 _ => {
3029 {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedPointer {
span,
name,
ty: in_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
3030 }
3031 }
3032 match out_elem.kind() {
3033 ty::RawPtr(p_ty, _) => {
3034 let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
3035 bx.tcx.normalize_erasing_regions(bx.typing_env(), Unnormalized::new_wip(ty))
3036 });
3037 if !metadata.is_unit() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::CastWidePointer {
span,
name,
ty: out_elem,
});
return Err(());
};
};require!(
3038 metadata.is_unit(),
3039 InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem }
3040 );
3041 }
3042 _ => {
3043 {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedPointer {
span,
name,
ty: out_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
3044 }
3045 }
3046
3047 return Ok(args[0].immediate());
3048 }
3049
3050 if name == sym::simd_expose_provenance {
3051 let (out_len, out_elem) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
3052 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len,
});
return Err(());
};
};require!(
3053 in_len == out_len,
3054 InvalidMonomorphization::ReturnLengthInputType {
3055 span,
3056 name,
3057 in_len,
3058 in_ty,
3059 ret_ty,
3060 out_len
3061 }
3062 );
3063
3064 match in_elem.kind() {
3065 ty::RawPtr(_, _) => {}
3066 _ => {
3067 {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedPointer {
span,
name,
ty: in_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
3068 }
3069 }
3070 match out_elem.kind() {
3071 ty::Uint(ty::UintTy::Usize) => {}
3072 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedUsize {
span,
name,
ty: out_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
3073 }
3074
3075 return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
3076 }
3077
3078 if name == sym::simd_with_exposed_provenance {
3079 let (out_len, out_elem) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
3080 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len,
});
return Err(());
};
};require!(
3081 in_len == out_len,
3082 InvalidMonomorphization::ReturnLengthInputType {
3083 span,
3084 name,
3085 in_len,
3086 in_ty,
3087 ret_ty,
3088 out_len
3089 }
3090 );
3091
3092 match in_elem.kind() {
3093 ty::Uint(ty::UintTy::Usize) => {}
3094 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedUsize {
span,
name,
ty: in_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
3095 }
3096 match out_elem.kind() {
3097 ty::RawPtr(_, _) => {}
3098 _ => {
3099 {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedPointer {
span,
name,
ty: out_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
3100 }
3101 }
3102
3103 return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
3104 }
3105
3106 if name == sym::simd_cast || name == sym::simd_as {
3107 let (out_len, out_elem, out_num_vecs) = {
if !(ret_ty.is_simd() || ret_ty.is_scalable_vector()) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
if ret_ty.is_simd() {
let (len, ty) = ret_ty.simd_size_and_type(bx.tcx());
(len, ty, None)
} else {
let (count, ty, num_vecs) =
ret_ty.scalable_vector_parts(bx.tcx()).expect("`is_scalable_vector` was wrong");
(count as u64, ty, Some(num_vecs))
}
}require_simd_or_scalable!(ret_ty, SimdReturn);
3108 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len,
});
return Err(());
};
};require!(
3109 in_len == out_len,
3110 InvalidMonomorphization::ReturnLengthInputType {
3111 span,
3112 name,
3113 in_len,
3114 in_ty,
3115 ret_ty,
3116 out_len
3117 }
3118 );
3119 if !(in_num_vecs == out_num_vecs) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnNumVecsInputType {
span,
name,
in_num_vecs: in_num_vecs.unwrap_or(NumScalableVectors(1)),
in_ty,
ret_ty,
out_num_vecs: out_num_vecs.unwrap_or(NumScalableVectors(1)),
});
return Err(());
};
};require!(
3120 in_num_vecs == out_num_vecs,
3121 InvalidMonomorphization::ReturnNumVecsInputType {
3122 span,
3123 name,
3124 in_num_vecs: in_num_vecs.unwrap_or(NumScalableVectors(1)),
3125 in_ty,
3126 ret_ty,
3127 out_num_vecs: out_num_vecs.unwrap_or(NumScalableVectors(1))
3128 }
3129 );
3130
3131 if in_elem == out_elem {
3133 return Ok(args[0].immediate());
3134 }
3135
3136 #[derive(#[automatically_derived]
impl ::core::marker::Copy for Sign { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Sign {
#[inline]
fn clone(&self) -> Sign { *self }
}Clone)]
3137 enum Sign {
3138 Unsigned,
3139 Signed,
3140 }
3141 use Sign::*;
3142
3143 enum Style {
3144 Float,
3145 Int(Sign),
3146 Unsupported,
3147 }
3148
3149 let (in_style, in_width) = match in_elem.kind() {
3150 ty::Int(i) => (
3153 Style::Int(Signed),
3154 i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3155 ),
3156 ty::Uint(u) => (
3157 Style::Int(Unsigned),
3158 u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3159 ),
3160 ty::Float(f) => (Style::Float, f.bit_width()),
3161 _ => (Style::Unsupported, 0),
3162 };
3163 let (out_style, out_width) = match out_elem.kind() {
3164 ty::Int(i) => (
3165 Style::Int(Signed),
3166 i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3167 ),
3168 ty::Uint(u) => (
3169 Style::Int(Unsigned),
3170 u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3171 ),
3172 ty::Float(f) => (Style::Float, f.bit_width()),
3173 _ => (Style::Unsupported, 0),
3174 };
3175
3176 match (in_style, out_style) {
3177 (Style::Int(sign), Style::Int(_)) => {
3178 return Ok(match in_width.cmp(&out_width) {
3179 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
3180 Ordering::Equal => args[0].immediate(),
3181 Ordering::Less => match sign {
3182 Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
3183 Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
3184 },
3185 });
3186 }
3187 (Style::Int(Sign::Signed), Style::Float) => {
3188 return Ok(bx.sitofp(args[0].immediate(), llret_ty));
3189 }
3190 (Style::Int(Sign::Unsigned), Style::Float) => {
3191 return Ok(bx.uitofp(args[0].immediate(), llret_ty));
3192 }
3193 (Style::Float, Style::Int(sign)) => {
3194 return Ok(match (sign, name == sym::simd_as) {
3195 (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
3196 (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
3197 (_, true) => bx.cast_float_to_int(
3198 #[allow(non_exhaustive_omitted_patterns)] match sign {
Sign::Signed => true,
_ => false,
}matches!(sign, Sign::Signed),
3199 args[0].immediate(),
3200 llret_ty,
3201 ),
3202 });
3203 }
3204 (Style::Float, Style::Float) => {
3205 return Ok(match in_width.cmp(&out_width) {
3206 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
3207 Ordering::Equal => args[0].immediate(),
3208 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
3209 });
3210 }
3211 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedCast {
span,
name,
in_ty,
in_elem,
ret_ty,
out_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::UnsupportedCast {
3212 span,
3213 name,
3214 in_ty,
3215 in_elem,
3216 ret_ty,
3217 out_elem
3218 }),
3219 }
3220 }
3221 macro_rules! arith_binary {
3222 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
3223 $(if name == sym::$name {
3224 match in_elem.kind() {
3225 $($(ty::$p(_))|* => {
3226 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
3227 })*
3228 _ => {},
3229 }
3230 return_error!(
3231 InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
3232 );
3233 })*
3234 }
3235 }
3236 if name == sym::simd_minimum_number_nsz {
match in_elem.kind() {
ty::Float(_) => {
return Ok(bx.minimum_number_nsz(args[0].immediate(),
args[1].immediate()))
}
_ => {}
}
{
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedOperation {
span,
name,
in_ty,
in_elem,
});
return Err(());
};
}arith_binary! {
3237 simd_add: Uint, Int => add, Float => fadd;
3238 simd_sub: Uint, Int => sub, Float => fsub;
3239 simd_mul: Uint, Int => mul, Float => fmul;
3240 simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
3241 simd_rem: Uint => urem, Int => srem, Float => frem;
3242 simd_shl: Uint, Int => shl;
3243 simd_shr: Uint => lshr, Int => ashr;
3244 simd_and: Uint, Int => and;
3245 simd_or: Uint, Int => or;
3246 simd_xor: Uint, Int => xor;
3247 simd_maximum_number_nsz: Float => maximum_number_nsz;
3248 simd_minimum_number_nsz: Float => minimum_number_nsz;
3249
3250 }
3251 macro_rules! arith_unary {
3252 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
3253 $(if name == sym::$name {
3254 match in_elem.kind() {
3255 $($(ty::$p(_))|* => {
3256 return Ok(bx.$call(args[0].immediate()))
3257 })*
3258 _ => {},
3259 }
3260 return_error!(
3261 InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
3262 );
3263 })*
3264 }
3265 }
3266 if name == sym::simd_neg {
match in_elem.kind() {
ty::Int(_) => { return Ok(bx.neg(args[0].immediate())) }
ty::Float(_) => { return Ok(bx.fneg(args[0].immediate())) }
_ => {}
}
{
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedOperation {
span,
name,
in_ty,
in_elem,
});
return Err(());
};
}arith_unary! {
3267 simd_neg: Int => neg, Float => fneg;
3268 }
3269
3270 if #[allow(non_exhaustive_omitted_patterns)] match name {
sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctlz | sym::simd_ctpop
| sym::simd_cttz | sym::simd_carryless_mul | sym::simd_funnel_shl |
sym::simd_funnel_shr => true,
_ => false,
}matches!(
3272 name,
3273 sym::simd_bswap
3274 | sym::simd_bitreverse
3275 | sym::simd_ctlz
3276 | sym::simd_ctpop
3277 | sym::simd_cttz
3278 | sym::simd_carryless_mul
3279 | sym::simd_funnel_shl
3280 | sym::simd_funnel_shr
3281 ) {
3282 let vec_ty = bx.cx.type_vector(
3283 match *in_elem.kind() {
3284 ty::Int(i) => bx.cx.type_int_from_ty(i),
3285 ty::Uint(i) => bx.cx.type_uint_from_ty(i),
3286 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedOperation {
span,
name,
in_ty,
in_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::UnsupportedOperation {
3287 span,
3288 name,
3289 in_ty,
3290 in_elem
3291 }),
3292 },
3293 in_len as u64,
3294 );
3295 let llvm_intrinsic = match name {
3296 sym::simd_bswap => "llvm.bswap",
3297 sym::simd_bitreverse => "llvm.bitreverse",
3298 sym::simd_ctlz => "llvm.ctlz",
3299 sym::simd_ctpop => "llvm.ctpop",
3300 sym::simd_cttz => "llvm.cttz",
3301 sym::simd_funnel_shl => "llvm.fshl",
3302 sym::simd_funnel_shr => "llvm.fshr",
3303 sym::simd_carryless_mul => "llvm.clmul",
3304 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
3305 };
3306 let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
3307
3308 return match name {
3309 sym::simd_bswap if int_size == 8 => Ok(args[0].immediate()),
3311 sym::simd_ctlz | sym::simd_cttz => {
3312 let dont_poison_on_zero = bx.const_int(bx.type_i1(), 0);
3314 Ok(bx.call_intrinsic(
3315 llvm_intrinsic,
3316 &[vec_ty],
3317 &[args[0].immediate(), dont_poison_on_zero],
3318 ))
3319 }
3320 sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop => {
3321 Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[args[0].immediate()]))
3323 }
3324 sym::simd_funnel_shl | sym::simd_funnel_shr => Ok(bx.call_intrinsic(
3325 llvm_intrinsic,
3326 &[vec_ty],
3327 &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
3328 )),
3329 sym::simd_carryless_mul => {
3330 if crate::llvm_util::get_version() >= (22, 0, 0) {
3331 Ok(bx.call_intrinsic(
3332 llvm_intrinsic,
3333 &[vec_ty],
3334 &[args[0].immediate(), args[1].immediate()],
3335 ))
3336 } else {
3337 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("`simd_carryless_mul` needs LLVM 22 or higher"));span_bug!(span, "`simd_carryless_mul` needs LLVM 22 or higher");
3338 }
3339 }
3340 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
3341 };
3342 }
3343
3344 if name == sym::simd_arith_offset {
3345 let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
3347 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("must be called with a vector of pointer types as first argument"))span_bug!(span, "must be called with a vector of pointer types as first argument")
3348 });
3349 let layout = bx.layout_of(pointee);
3350 let ptrs = args[0].immediate();
3351 let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx());
3354 if !#[allow(non_exhaustive_omitted_patterns)] match offsets_elem.kind() {
ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize) => true,
_ => false,
}matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
3355 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("must be called with a vector of pointer-sized integers as second argument"));span_bug!(
3356 span,
3357 "must be called with a vector of pointer-sized integers as second argument"
3358 );
3359 }
3360 let offsets = args[1].immediate();
3361
3362 return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
3363 }
3364
3365 if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
3366 let lhs = args[0].immediate();
3367 let rhs = args[1].immediate();
3368 let is_add = name == sym::simd_saturating_add;
3369 let (signed, elem_ty) = match *in_elem.kind() {
3370 ty::Int(i) => (true, bx.cx.type_int_from_ty(i)),
3371 ty::Uint(i) => (false, bx.cx.type_uint_from_ty(i)),
3372 _ => {
3373 {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedVectorElementType {
span,
name,
expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
vector_type: args[0].layout.ty,
});
return Err(());
};return_error!(InvalidMonomorphization::ExpectedVectorElementType {
3374 span,
3375 name,
3376 expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
3377 vector_type: args[0].layout.ty
3378 });
3379 }
3380 };
3381 let llvm_intrinsic = ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("llvm.{0}{1}.sat",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" }))
})format!(
3382 "llvm.{}{}.sat",
3383 if signed { 's' } else { 'u' },
3384 if is_add { "add" } else { "sub" },
3385 );
3386 let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
3387
3388 return Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[lhs, rhs]));
3389 }
3390
3391 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("unknown SIMD intrinsic"));span_bug!(span, "unknown SIMD intrinsic");
3392}