1use std::cmp::Ordering;
2use std::ffi::c_uint;
3use std::{assert_matches, iter, ptr};
4
5use rustc_abi::{
6 Align, BackendRepr, ExternAbi, Float, HasDataLayout, NumScalableVectors, Primitive, Size,
7 WrappingRange,
8};
9use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
10use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
11use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
12use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
13use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
14use rustc_codegen_ssa::traits::*;
15use rustc_hir as hir;
16use rustc_hir::def_id::LOCAL_CRATE;
17use rustc_hir::find_attr;
18use rustc_middle::mir::BinOp;
19use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf};
20use rustc_middle::ty::offload_meta::OffloadMetadata;
21use rustc_middle::ty::{self, GenericArgsRef, Instance, SimdAlign, Ty, TyCtxt, TypingEnv};
22use rustc_middle::{bug, span_bug};
23use rustc_session::config::CrateType;
24use rustc_session::lint::builtin::DEPRECATED_LLVM_INTRINSIC;
25use rustc_span::{Span, Symbol, sym};
26use rustc_symbol_mangling::{mangle_internal_symbol, symbol_name_for_instance_in_crate};
27use rustc_target::callconv::PassMode;
28use rustc_target::spec::{Arch, Os};
29use tracing::debug;
30
31use crate::abi::FnAbiLlvmExt;
32use crate::builder::Builder;
33use crate::builder::autodiff::{adjust_activity_to_abi, generate_enzyme_call};
34use crate::builder::gpu_offload::{
35 OffloadKernelDims, gen_call_handling, gen_define_handling, register_offload,
36};
37use crate::context::CodegenCx;
38use crate::declare::declare_raw_fn;
39use crate::errors::{
40 AutoDiffWithoutEnable, AutoDiffWithoutLto, IntrinsicSignatureMismatch, IntrinsicWrongArch,
41 OffloadWithoutEnable, OffloadWithoutFatLTO, UnknownIntrinsic,
42};
43use crate::llvm::{self, Type, Value};
44use crate::type_of::LayoutLlvmExt;
45use crate::va_arg::emit_va_arg;
46
47fn call_simple_intrinsic<'ll, 'tcx>(
48 bx: &mut Builder<'_, 'll, 'tcx>,
49 name: Symbol,
50 args: &[OperandRef<'tcx, &'ll Value>],
51) -> Option<&'ll Value> {
52 let (base_name, type_params): (&'static str, &[&'ll Type]) = match name {
53 sym::sqrtf16 => ("llvm.sqrt", &[bx.type_f16()]),
54 sym::sqrtf32 => ("llvm.sqrt", &[bx.type_f32()]),
55 sym::sqrtf64 => ("llvm.sqrt", &[bx.type_f64()]),
56 sym::sqrtf128 => ("llvm.sqrt", &[bx.type_f128()]),
57
58 sym::powif16 => ("llvm.powi", &[bx.type_f16(), bx.type_i32()]),
59 sym::powif32 => ("llvm.powi", &[bx.type_f32(), bx.type_i32()]),
60 sym::powif64 => ("llvm.powi", &[bx.type_f64(), bx.type_i32()]),
61 sym::powif128 => ("llvm.powi", &[bx.type_f128(), bx.type_i32()]),
62
63 sym::sinf16 => ("llvm.sin", &[bx.type_f16()]),
64 sym::sinf32 => ("llvm.sin", &[bx.type_f32()]),
65 sym::sinf64 => ("llvm.sin", &[bx.type_f64()]),
66 sym::sinf128 => ("llvm.sin", &[bx.type_f128()]),
67
68 sym::cosf16 => ("llvm.cos", &[bx.type_f16()]),
69 sym::cosf32 => ("llvm.cos", &[bx.type_f32()]),
70 sym::cosf64 => ("llvm.cos", &[bx.type_f64()]),
71 sym::cosf128 => ("llvm.cos", &[bx.type_f128()]),
72
73 sym::powf16 => ("llvm.pow", &[bx.type_f16()]),
74 sym::powf32 => ("llvm.pow", &[bx.type_f32()]),
75 sym::powf64 => ("llvm.pow", &[bx.type_f64()]),
76 sym::powf128 => ("llvm.pow", &[bx.type_f128()]),
77
78 sym::expf16 => ("llvm.exp", &[bx.type_f16()]),
79 sym::expf32 => ("llvm.exp", &[bx.type_f32()]),
80 sym::expf64 => ("llvm.exp", &[bx.type_f64()]),
81 sym::expf128 => ("llvm.exp", &[bx.type_f128()]),
82
83 sym::exp2f16 => ("llvm.exp2", &[bx.type_f16()]),
84 sym::exp2f32 => ("llvm.exp2", &[bx.type_f32()]),
85 sym::exp2f64 => ("llvm.exp2", &[bx.type_f64()]),
86 sym::exp2f128 => ("llvm.exp2", &[bx.type_f128()]),
87
88 sym::logf16 => ("llvm.log", &[bx.type_f16()]),
89 sym::logf32 => ("llvm.log", &[bx.type_f32()]),
90 sym::logf64 => ("llvm.log", &[bx.type_f64()]),
91 sym::logf128 => ("llvm.log", &[bx.type_f128()]),
92
93 sym::log10f16 => ("llvm.log10", &[bx.type_f16()]),
94 sym::log10f32 => ("llvm.log10", &[bx.type_f32()]),
95 sym::log10f64 => ("llvm.log10", &[bx.type_f64()]),
96 sym::log10f128 => ("llvm.log10", &[bx.type_f128()]),
97
98 sym::log2f16 => ("llvm.log2", &[bx.type_f16()]),
99 sym::log2f32 => ("llvm.log2", &[bx.type_f32()]),
100 sym::log2f64 => ("llvm.log2", &[bx.type_f64()]),
101 sym::log2f128 => ("llvm.log2", &[bx.type_f128()]),
102
103 sym::fmaf16 => ("llvm.fma", &[bx.type_f16()]),
104 sym::fmaf32 => ("llvm.fma", &[bx.type_f32()]),
105 sym::fmaf64 => ("llvm.fma", &[bx.type_f64()]),
106 sym::fmaf128 => ("llvm.fma", &[bx.type_f128()]),
107
108 sym::fmuladdf16 => ("llvm.fmuladd", &[bx.type_f16()]),
109 sym::fmuladdf32 => ("llvm.fmuladd", &[bx.type_f32()]),
110 sym::fmuladdf64 => ("llvm.fmuladd", &[bx.type_f64()]),
111 sym::fmuladdf128 => ("llvm.fmuladd", &[bx.type_f128()]),
112
113 sym::copysignf16 => ("llvm.copysign", &[bx.type_f16()]),
128 sym::copysignf32 => ("llvm.copysign", &[bx.type_f32()]),
129 sym::copysignf64 => ("llvm.copysign", &[bx.type_f64()]),
130 sym::copysignf128 => ("llvm.copysign", &[bx.type_f128()]),
131
132 sym::floorf16 => ("llvm.floor", &[bx.type_f16()]),
133 sym::floorf32 => ("llvm.floor", &[bx.type_f32()]),
134 sym::floorf64 => ("llvm.floor", &[bx.type_f64()]),
135 sym::floorf128 => ("llvm.floor", &[bx.type_f128()]),
136
137 sym::ceilf16 => ("llvm.ceil", &[bx.type_f16()]),
138 sym::ceilf32 => ("llvm.ceil", &[bx.type_f32()]),
139 sym::ceilf64 => ("llvm.ceil", &[bx.type_f64()]),
140 sym::ceilf128 => ("llvm.ceil", &[bx.type_f128()]),
141
142 sym::truncf16 => ("llvm.trunc", &[bx.type_f16()]),
143 sym::truncf32 => ("llvm.trunc", &[bx.type_f32()]),
144 sym::truncf64 => ("llvm.trunc", &[bx.type_f64()]),
145 sym::truncf128 => ("llvm.trunc", &[bx.type_f128()]),
146
147 sym::round_ties_even_f16 => ("llvm.rint", &[bx.type_f16()]),
152 sym::round_ties_even_f32 => ("llvm.rint", &[bx.type_f32()]),
153 sym::round_ties_even_f64 => ("llvm.rint", &[bx.type_f64()]),
154 sym::round_ties_even_f128 => ("llvm.rint", &[bx.type_f128()]),
155
156 sym::roundf16 => ("llvm.round", &[bx.type_f16()]),
157 sym::roundf32 => ("llvm.round", &[bx.type_f32()]),
158 sym::roundf64 => ("llvm.round", &[bx.type_f64()]),
159 sym::roundf128 => ("llvm.round", &[bx.type_f128()]),
160
161 _ => return None,
162 };
163 Some(bx.call_intrinsic(
164 base_name,
165 type_params,
166 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
167 ))
168}
169
170impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
171 fn codegen_intrinsic_call(
172 &mut self,
173 instance: ty::Instance<'tcx>,
174 args: &[OperandRef<'tcx, &'ll Value>],
175 result: PlaceRef<'tcx, &'ll Value>,
176 span: Span,
177 ) -> Result<(), ty::Instance<'tcx>> {
178 let tcx = self.tcx;
179
180 let name = tcx.item_name(instance.def_id());
181 let fn_args = instance.args;
182
183 let simple = call_simple_intrinsic(self, name, args);
184 let llval = match name {
185 _ if simple.is_some() => simple.unwrap(),
186 sym::minimum_number_nsz_f16
187 | sym::minimum_number_nsz_f32
188 | sym::minimum_number_nsz_f64
189 | sym::minimum_number_nsz_f128
190 | sym::maximum_number_nsz_f16
191 | sym::maximum_number_nsz_f32
192 | sym::maximum_number_nsz_f64
193 | sym::maximum_number_nsz_f128
194 if crate::llvm_util::get_version() >= (22, 0, 0) =>
196 {
197 let intrinsic_name = if name.as_str().starts_with("min") {
198 "llvm.minimumnum"
199 } else {
200 "llvm.maximumnum"
201 };
202 let call = self.call_intrinsic(
203 intrinsic_name,
204 &[args[0].layout.immediate_llvm_type(self.cx)],
205 &[args[0].immediate(), args[1].immediate()],
206 );
207 unsafe { llvm::LLVMRustSetNoSignedZeros(call) };
210 call
211 }
212 sym::ptr_mask => {
213 let ptr = args[0].immediate();
214 self.call_intrinsic(
215 "llvm.ptrmask",
216 &[self.val_ty(ptr), self.type_isize()],
217 &[ptr, args[1].immediate()],
218 )
219 }
220 sym::autodiff => {
221 codegen_autodiff(self, tcx, instance, args, result);
222 return Ok(());
223 }
224 sym::offload => {
225 if tcx.sess.opts.unstable_opts.offload.is_empty() {
226 let _ = tcx.dcx().emit_almost_fatal(OffloadWithoutEnable);
227 }
228
229 if tcx.sess.lto() != rustc_session::config::Lto::Fat {
230 let _ = tcx.dcx().emit_almost_fatal(OffloadWithoutFatLTO);
231 }
232
233 codegen_offload(self, tcx, instance, args);
234 return Ok(());
235 }
236 sym::is_val_statically_known => {
237 if let OperandValue::Immediate(imm) = args[0].val {
238 self.call_intrinsic(
239 "llvm.is.constant",
240 &[args[0].layout.immediate_llvm_type(self.cx)],
241 &[imm],
242 )
243 } else {
244 self.const_bool(false)
245 }
246 }
247 sym::select_unpredictable => {
248 let cond = args[0].immediate();
249 match (&args[1].layout, &args[2].layout) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(args[1].layout, args[2].layout);
250 let select = |bx: &mut Self, true_val, false_val| {
251 let result = bx.select(cond, true_val, false_val);
252 bx.set_unpredictable(&result);
253 result
254 };
255 match (args[1].val, args[2].val) {
256 (OperandValue::Ref(true_val), OperandValue::Ref(false_val)) => {
257 if !true_val.llextra.is_none() {
::core::panicking::panic("assertion failed: true_val.llextra.is_none()")
};assert!(true_val.llextra.is_none());
258 if !false_val.llextra.is_none() {
::core::panicking::panic("assertion failed: false_val.llextra.is_none()")
};assert!(false_val.llextra.is_none());
259 match (&true_val.align, &false_val.align) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(true_val.align, false_val.align);
260 let ptr = select(self, true_val.llval, false_val.llval);
261 let selected =
262 OperandValue::Ref(PlaceValue::new_sized(ptr, true_val.align));
263 selected.store(self, result);
264 return Ok(());
265 }
266 (OperandValue::Immediate(_), OperandValue::Immediate(_))
267 | (OperandValue::Pair(_, _), OperandValue::Pair(_, _)) => {
268 let true_val = args[1].immediate_or_packed_pair(self);
269 let false_val = args[2].immediate_or_packed_pair(self);
270 select(self, true_val, false_val)
271 }
272 (OperandValue::ZeroSized, OperandValue::ZeroSized) => return Ok(()),
273 _ => ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("Incompatible OperandValue for select_unpredictable"))span_bug!(span, "Incompatible OperandValue for select_unpredictable"),
274 }
275 }
276 sym::catch_unwind => {
277 catch_unwind_intrinsic(
278 self,
279 args[0].immediate(),
280 args[1].immediate(),
281 args[2].immediate(),
282 result,
283 );
284 return Ok(());
285 }
286 sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[], &[]),
287 sym::va_arg => {
288 let BackendRepr::Scalar(scalar) = result.layout.backend_repr else {
289 ::rustc_middle::util::bug::bug_fmt(format_args!("the va_arg intrinsic does not support non-scalar types"))bug!("the va_arg intrinsic does not support non-scalar types")
290 };
291
292 match scalar.primitive() {
293 Primitive::Pointer(_) => {
294 emit_va_arg(self, args[0], result.layout.ty)
296 }
297 Primitive::Int(..) => {
298 let int_width = self.cx().size_of(result.layout.ty).bits();
299 let target_c_int_width = self.cx().sess().target.options.c_int_width;
300 if int_width < u64::from(target_c_int_width) {
301 ::rustc_middle::util::bug::bug_fmt(format_args!("va_arg got i{0} but needs at least c_int (an i{1})",
int_width, target_c_int_width));bug!(
304 "va_arg got i{} but needs at least c_int (an i{})",
305 int_width,
306 target_c_int_width
307 );
308 }
309 emit_va_arg(self, args[0], result.layout.ty)
310 }
311 Primitive::Float(Float::F16) => {
312 ::rustc_middle::util::bug::bug_fmt(format_args!("the va_arg intrinsic does not support `f16`"))bug!("the va_arg intrinsic does not support `f16`")
313 }
314 Primitive::Float(Float::F32) => {
315 if self.cx().sess().target.arch == Arch::Avr {
316 emit_va_arg(self, args[0], result.layout.ty)
318 } else {
319 ::rustc_middle::util::bug::bug_fmt(format_args!("the va_arg intrinsic does not support `f32` on this target"))bug!("the va_arg intrinsic does not support `f32` on this target")
320 }
321 }
322 Primitive::Float(Float::F64) => {
323 emit_va_arg(self, args[0], result.layout.ty)
325 }
326 Primitive::Float(Float::F128) => {
327 ::rustc_middle::util::bug::bug_fmt(format_args!("the va_arg intrinsic does not support `f128`"))bug!("the va_arg intrinsic does not support `f128`")
328 }
329 }
330 }
331
332 sym::volatile_load | sym::unaligned_volatile_load => {
333 let ptr = args[0].immediate();
334 let load = self.volatile_load(result.layout.llvm_type(self), ptr);
335 let align = if name == sym::unaligned_volatile_load {
336 1
337 } else {
338 result.layout.align.bytes() as u32
339 };
340 unsafe {
341 llvm::LLVMSetAlignment(load, align);
342 }
343 if !result.layout.is_zst() {
344 self.store_to_place(load, result.val);
345 }
346 return Ok(());
347 }
348 sym::volatile_store => {
349 let dst = args[0].deref(self.cx());
350 args[1].val.volatile_store(self, dst);
351 return Ok(());
352 }
353 sym::unaligned_volatile_store => {
354 let dst = args[0].deref(self.cx());
355 args[1].val.unaligned_volatile_store(self, dst);
356 return Ok(());
357 }
358 sym::prefetch_read_data
359 | sym::prefetch_write_data
360 | sym::prefetch_read_instruction
361 | sym::prefetch_write_instruction => {
362 let (rw, cache_type) = match name {
363 sym::prefetch_read_data => (0, 1),
364 sym::prefetch_write_data => (1, 1),
365 sym::prefetch_read_instruction => (0, 0),
366 sym::prefetch_write_instruction => (1, 0),
367 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
368 };
369 let ptr = args[0].immediate();
370 let locality = fn_args.const_at(1).to_leaf().to_i32();
371 self.call_intrinsic(
372 "llvm.prefetch",
373 &[self.val_ty(ptr)],
374 &[
375 ptr,
376 self.const_i32(rw),
377 self.const_i32(locality),
378 self.const_i32(cache_type),
379 ],
380 )
381 }
382 sym::carrying_mul_add => {
383 let (size, signed) = fn_args.type_at(0).int_size_and_signed(self.tcx);
384
385 let wide_llty = self.type_ix(size.bits() * 2);
386 let args = args.as_array().unwrap();
387 let [a, b, c, d] = args.map(|a| self.intcast(a.immediate(), wide_llty, signed));
388
389 let wide = if signed {
390 let prod = self.unchecked_smul(a, b);
391 let acc = self.unchecked_sadd(prod, c);
392 self.unchecked_sadd(acc, d)
393 } else {
394 let prod = self.unchecked_umul(a, b);
395 let acc = self.unchecked_uadd(prod, c);
396 self.unchecked_uadd(acc, d)
397 };
398
399 let narrow_llty = self.type_ix(size.bits());
400 let low = self.trunc(wide, narrow_llty);
401 let bits_const = self.const_uint(wide_llty, size.bits());
402 let high = self.lshr(wide, bits_const);
404 let high = self.trunc(high, narrow_llty);
406
407 let pair_llty = self.type_struct(&[narrow_llty, narrow_llty], false);
408 let pair = self.const_poison(pair_llty);
409 let pair = self.insert_value(pair, low, 0);
410 let pair = self.insert_value(pair, high, 1);
411 pair
412 }
413
414 sym::carryless_mul if crate::llvm_util::get_version() >= (22, 0, 0) => {
416 let ty = args[0].layout.ty;
417 if !ty.is_integral() {
418 tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
419 span,
420 name,
421 ty,
422 });
423 return Ok(());
424 }
425 let (size, _) = ty.int_size_and_signed(self.tcx);
426 let width = size.bits();
427 let llty = self.type_ix(width);
428
429 let lhs = args[0].immediate();
430 let rhs = args[1].immediate();
431 self.call_intrinsic("llvm.clmul", &[llty], &[lhs, rhs])
432 }
433
434 sym::ctlz
435 | sym::ctlz_nonzero
436 | sym::cttz
437 | sym::cttz_nonzero
438 | sym::ctpop
439 | sym::bswap
440 | sym::bitreverse
441 | sym::saturating_add
442 | sym::saturating_sub
443 | sym::unchecked_funnel_shl
444 | sym::unchecked_funnel_shr => {
445 let ty = args[0].layout.ty;
446 if !ty.is_integral() {
447 tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
448 span,
449 name,
450 ty,
451 });
452 return Ok(());
453 }
454 let (size, signed) = ty.int_size_and_signed(self.tcx);
455 let width = size.bits();
456 let llty = self.type_ix(width);
457 match name {
458 sym::ctlz | sym::ctlz_nonzero | sym::cttz | sym::cttz_nonzero => {
459 let y =
460 self.const_bool(name == sym::ctlz_nonzero || name == sym::cttz_nonzero);
461 let llvm_name = if name == sym::ctlz || name == sym::ctlz_nonzero {
462 "llvm.ctlz"
463 } else {
464 "llvm.cttz"
465 };
466 let ret =
467 self.call_intrinsic(llvm_name, &[llty], &[args[0].immediate(), y]);
468 self.intcast(ret, result.layout.llvm_type(self), false)
469 }
470 sym::ctpop => {
471 let ret =
472 self.call_intrinsic("llvm.ctpop", &[llty], &[args[0].immediate()]);
473 self.intcast(ret, result.layout.llvm_type(self), false)
474 }
475 sym::bswap => {
476 if width == 8 {
477 args[0].immediate() } else {
479 self.call_intrinsic("llvm.bswap", &[llty], &[args[0].immediate()])
480 }
481 }
482 sym::bitreverse => {
483 self.call_intrinsic("llvm.bitreverse", &[llty], &[args[0].immediate()])
484 }
485 sym::unchecked_funnel_shl | sym::unchecked_funnel_shr => {
486 let is_left = name == sym::unchecked_funnel_shl;
487 let lhs = args[0].immediate();
488 let rhs = args[1].immediate();
489 let raw_shift = args[2].immediate();
490 let llvm_name = ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("llvm.fsh{0}",
if is_left { 'l' } else { 'r' }))
})format!("llvm.fsh{}", if is_left { 'l' } else { 'r' });
491
492 let raw_shift = self.intcast(raw_shift, self.val_ty(lhs), false);
495
496 self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs, raw_shift])
497 }
498 sym::saturating_add | sym::saturating_sub => {
499 let is_add = name == sym::saturating_add;
500 let lhs = args[0].immediate();
501 let rhs = args[1].immediate();
502 let llvm_name = ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("llvm.{0}{1}.sat",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" }))
})format!(
503 "llvm.{}{}.sat",
504 if signed { 's' } else { 'u' },
505 if is_add { "add" } else { "sub" },
506 );
507 self.call_intrinsic(llvm_name, &[llty], &[lhs, rhs])
508 }
509 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
510 }
511 }
512
513 sym::fabs => {
514 let ty = args[0].layout.ty;
515 let ty::Float(f) = ty.kind() else {
516 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("the `fabs` intrinsic requires a floating-point argument, got {0:?}",
ty));span_bug!(span, "the `fabs` intrinsic requires a floating-point argument, got {:?}", ty);
517 };
518 let llty = self.type_float_from_ty(*f);
519 let llvm_name = "llvm.fabs";
520 self.call_intrinsic(
521 llvm_name,
522 &[llty],
523 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
524 )
525 }
526
527 sym::raw_eq => {
528 use BackendRepr::*;
529 let tp_ty = fn_args.type_at(0);
530 let layout = self.layout_of(tp_ty).layout;
531 let use_integer_compare = match layout.backend_repr() {
532 Scalar(_) | ScalarPair(_, _) => true,
533 SimdVector { .. } => false,
534 SimdScalableVector { .. } => {
535 tcx.dcx().emit_err(InvalidMonomorphization::NonScalableType {
536 span,
537 name: sym::raw_eq,
538 ty: tp_ty,
539 });
540 return Ok(());
541 }
542 Memory { .. } => {
543 layout.size() <= self.data_layout().pointer_size() * 2
547 }
548 };
549
550 let a = args[0].immediate();
551 let b = args[1].immediate();
552 if layout.size().bytes() == 0 {
553 self.const_bool(true)
554 } else if use_integer_compare {
555 let integer_ty = self.type_ix(layout.size().bits());
556 let a_val = self.load(integer_ty, a, layout.align().abi);
557 let b_val = self.load(integer_ty, b, layout.align().abi);
558 self.icmp(IntPredicate::IntEQ, a_val, b_val)
559 } else {
560 let n = self.const_usize(layout.size().bytes());
561 let cmp = self.call_intrinsic("memcmp", &[], &[a, b, n]);
562 self.icmp(IntPredicate::IntEQ, cmp, self.const_int(self.type_int(), 0))
563 }
564 }
565
566 sym::compare_bytes => {
567 let cmp = self.call_intrinsic(
569 "memcmp",
570 &[],
571 &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
572 );
573 self.sext(cmp, self.type_ix(32))
575 }
576
577 sym::black_box => {
578 args[0].val.store(self, result);
579 let result_val_span = [result.val.llval];
580 let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() {
590 ("~{memory}", &[])
591 } else {
592 ("r,~{memory}", &result_val_span)
593 };
594 crate::asm::inline_asm_call(
595 self,
596 "",
597 constraint,
598 inputs,
599 self.type_void(),
600 &[],
601 true,
602 false,
603 llvm::AsmDialect::Att,
604 &[span],
605 false,
606 None,
607 None,
608 )
609 .unwrap_or_else(|| ::rustc_middle::util::bug::bug_fmt(format_args!("failed to generate inline asm call for `black_box`"))bug!("failed to generate inline asm call for `black_box`"));
610
611 return Ok(());
613 }
614
615 sym::amdgpu_dispatch_ptr => {
616 let val = self.call_intrinsic("llvm.amdgcn.dispatch.ptr", &[], &[]);
617 self.pointercast(val, self.type_ptr())
619 }
620
621 sym::sve_tuple_create2 => {
622 match self.layout_of(fn_args.type_at(0)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(1), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(1), .. }",
::core::option::Option::None);
}
};assert_matches!(
623 self.layout_of(fn_args.type_at(0)).backend_repr,
624 BackendRepr::SimdScalableVector {
625 number_of_vectors: NumScalableVectors(1),
626 ..
627 }
628 );
629 let tuple_ty = self.layout_of(fn_args.type_at(1));
630 match tuple_ty.backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(2), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(2), .. }",
::core::option::Option::None);
}
};assert_matches!(
631 tuple_ty.backend_repr,
632 BackendRepr::SimdScalableVector {
633 number_of_vectors: NumScalableVectors(2),
634 ..
635 }
636 );
637 let ret = self.const_poison(self.backend_type(tuple_ty));
638 let ret = self.insert_value(ret, args[0].immediate(), 0);
639 self.insert_value(ret, args[1].immediate(), 1)
640 }
641
642 sym::sve_tuple_create3 => {
643 match self.layout_of(fn_args.type_at(0)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(1), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(1), .. }",
::core::option::Option::None);
}
};assert_matches!(
644 self.layout_of(fn_args.type_at(0)).backend_repr,
645 BackendRepr::SimdScalableVector {
646 number_of_vectors: NumScalableVectors(1),
647 ..
648 }
649 );
650 let tuple_ty = self.layout_of(fn_args.type_at(1));
651 match tuple_ty.backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(3), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(3), .. }",
::core::option::Option::None);
}
};assert_matches!(
652 tuple_ty.backend_repr,
653 BackendRepr::SimdScalableVector {
654 number_of_vectors: NumScalableVectors(3),
655 ..
656 }
657 );
658 let ret = self.const_poison(self.backend_type(tuple_ty));
659 let ret = self.insert_value(ret, args[0].immediate(), 0);
660 let ret = self.insert_value(ret, args[1].immediate(), 1);
661 self.insert_value(ret, args[2].immediate(), 2)
662 }
663
664 sym::sve_tuple_create4 => {
665 match self.layout_of(fn_args.type_at(0)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(1), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(1), .. }",
::core::option::Option::None);
}
};assert_matches!(
666 self.layout_of(fn_args.type_at(0)).backend_repr,
667 BackendRepr::SimdScalableVector {
668 number_of_vectors: NumScalableVectors(1),
669 ..
670 }
671 );
672 let tuple_ty = self.layout_of(fn_args.type_at(1));
673 match tuple_ty.backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(4), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(4), .. }",
::core::option::Option::None);
}
};assert_matches!(
674 tuple_ty.backend_repr,
675 BackendRepr::SimdScalableVector {
676 number_of_vectors: NumScalableVectors(4),
677 ..
678 }
679 );
680 let ret = self.const_poison(self.backend_type(tuple_ty));
681 let ret = self.insert_value(ret, args[0].immediate(), 0);
682 let ret = self.insert_value(ret, args[1].immediate(), 1);
683 let ret = self.insert_value(ret, args[2].immediate(), 2);
684 self.insert_value(ret, args[3].immediate(), 3)
685 }
686
687 sym::sve_tuple_get => {
688 match self.layout_of(fn_args.type_at(0)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8), .. }
=> {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8), .. }",
::core::option::Option::None);
}
};assert_matches!(
689 self.layout_of(fn_args.type_at(0)).backend_repr,
690 BackendRepr::SimdScalableVector {
691 number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8),
692 ..
693 }
694 );
695 match self.layout_of(fn_args.type_at(1)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(1), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(1), .. }",
::core::option::Option::None);
}
};assert_matches!(
696 self.layout_of(fn_args.type_at(1)).backend_repr,
697 BackendRepr::SimdScalableVector {
698 number_of_vectors: NumScalableVectors(1),
699 ..
700 }
701 );
702 self.extract_value(
703 args[0].immediate(),
704 fn_args.const_at(2).to_leaf().to_i32() as u64,
705 )
706 }
707
708 sym::sve_tuple_set => {
709 match self.layout_of(fn_args.type_at(0)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8), .. }
=> {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8), .. }",
::core::option::Option::None);
}
};assert_matches!(
710 self.layout_of(fn_args.type_at(0)).backend_repr,
711 BackendRepr::SimdScalableVector {
712 number_of_vectors: NumScalableVectors(2 | 3 | 4 | 5 | 6 | 7 | 8),
713 ..
714 }
715 );
716 match self.layout_of(fn_args.type_at(1)).backend_repr {
BackendRepr::SimdScalableVector {
number_of_vectors: NumScalableVectors(1), .. } => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"BackendRepr::SimdScalableVector\n{ number_of_vectors: NumScalableVectors(1), .. }",
::core::option::Option::None);
}
};assert_matches!(
717 self.layout_of(fn_args.type_at(1)).backend_repr,
718 BackendRepr::SimdScalableVector {
719 number_of_vectors: NumScalableVectors(1),
720 ..
721 }
722 );
723 self.insert_value(
724 args[0].immediate(),
725 args[1].immediate(),
726 fn_args.const_at(2).to_leaf().to_i32() as u64,
727 )
728 }
729
730 _ if name.as_str().starts_with("simd_") => {
731 let mut loaded_args = Vec::new();
734 for arg in args {
735 loaded_args.push(
736 if arg.layout.ty.is_simd()
741 && let OperandValue::Ref(place) = arg.val
742 {
743 let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx());
744 let elem_ll_ty = match elem_ty.kind() {
745 ty::Float(f) => self.type_float_from_ty(*f),
746 ty::Int(i) => self.type_int_from_ty(*i),
747 ty::Uint(u) => self.type_uint_from_ty(*u),
748 ty::RawPtr(_, _) => self.type_ptr(),
749 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
750 };
751 let loaded =
752 self.load_from_place(self.type_vector(elem_ll_ty, size), place);
753 OperandRef::from_immediate_or_packed_pair(self, loaded, arg.layout)
754 } else {
755 *arg
756 },
757 );
758 }
759
760 let llret_ty = if result.layout.ty.is_simd()
761 && let BackendRepr::Memory { .. } = result.layout.backend_repr
762 {
763 let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx());
764 let elem_ll_ty = match elem_ty.kind() {
765 ty::Float(f) => self.type_float_from_ty(*f),
766 ty::Int(i) => self.type_int_from_ty(*i),
767 ty::Uint(u) => self.type_uint_from_ty(*u),
768 ty::RawPtr(_, _) => self.type_ptr(),
769 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
770 };
771 self.type_vector(elem_ll_ty, size)
772 } else {
773 result.layout.llvm_type(self)
774 };
775
776 match generic_simd_intrinsic(
777 self,
778 name,
779 fn_args,
780 &loaded_args,
781 result.layout.ty,
782 llret_ty,
783 span,
784 ) {
785 Ok(llval) => llval,
786 Err(()) => return Ok(()),
789 }
790 }
791
792 _ => {
793 {
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_llvm/src/intrinsic.rs:793",
"rustc_codegen_llvm::intrinsic", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_llvm/src/intrinsic.rs"),
::tracing_core::__macro_support::Option::Some(793u32),
::tracing_core::__macro_support::Option::Some("rustc_codegen_llvm::intrinsic"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("unknown intrinsic \'{0}\' -- falling back to default body",
name) as &dyn Value))])
});
} else { ; }
};debug!("unknown intrinsic '{}' -- falling back to default body", name);
794 return Err(ty::Instance::new_raw(instance.def_id(), instance.args));
796 }
797 };
798
799 if result.layout.ty.is_bool() {
800 let val = self.from_immediate(llval);
801 self.store_to_place(val, result.val);
802 } else if !result.layout.ty.is_unit() {
803 self.store_to_place(llval, result.val);
804 }
805 Ok(())
806 }
807
808 fn codegen_llvm_intrinsic_call(
809 &mut self,
810 instance: ty::Instance<'tcx>,
811 args: &[OperandRef<'tcx, Self::Value>],
812 _is_cleanup: bool,
813 ) -> Self::Value {
814 let tcx = self.tcx();
815
816 let fn_ty = instance.ty(tcx, self.typing_env());
817 let fn_sig = match *fn_ty.kind() {
818 ty::FnDef(def_id, args) => {
819 tcx.instantiate_bound_regions_with_erased(tcx.fn_sig(def_id).instantiate(tcx, args))
820 }
821 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
822 };
823 if !!fn_sig.c_variadic {
::core::panicking::panic("assertion failed: !fn_sig.c_variadic")
};assert!(!fn_sig.c_variadic);
824
825 let ret_layout = self.layout_of(fn_sig.output());
826 let llreturn_ty = if ret_layout.is_zst() {
827 self.type_void()
828 } else {
829 ret_layout.immediate_llvm_type(self)
830 };
831
832 let mut llargument_tys = Vec::with_capacity(fn_sig.inputs().len());
833 for &arg in fn_sig.inputs() {
834 let arg_layout = self.layout_of(arg);
835 if arg_layout.is_zst() {
836 continue;
837 }
838 llargument_tys.push(arg_layout.immediate_llvm_type(self));
839 }
840
841 let fn_ptr = if let Some(&llfn) = self.intrinsic_instances.borrow().get(&instance) {
842 llfn
843 } else {
844 let sym = tcx.symbol_name(instance).name;
845
846 let llfn = if let Some(llfn) = self.get_declared_value(sym) {
847 llfn
848 } else {
849 intrinsic_fn(self, sym, llreturn_ty, llargument_tys, instance)
850 };
851
852 self.intrinsic_instances.borrow_mut().insert(instance, llfn);
853
854 llfn
855 };
856 let fn_ty = self.get_type_of_global(fn_ptr);
857
858 let mut llargs = ::alloc::vec::Vec::new()vec![];
859
860 for arg in args {
861 match arg.val {
862 OperandValue::ZeroSized => {}
863 OperandValue::Immediate(a) => llargs.push(a),
864 OperandValue::Pair(a, b) => {
865 llargs.push(a);
866 llargs.push(b);
867 }
868 OperandValue::Ref(op_place_val) => {
869 let mut llval = op_place_val.llval;
870 llval = self.load(self.backend_type(arg.layout), llval, op_place_val.align);
876 if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
877 if scalar.is_bool() {
878 self.range_metadata(llval, WrappingRange { start: 0, end: 1 });
879 }
880 llval = self.to_immediate_scalar(llval, scalar);
882 }
883 llargs.push(llval);
884 }
885 }
886 }
887
888 {
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_llvm/src/intrinsic.rs:888",
"rustc_codegen_llvm::intrinsic", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_llvm/src/intrinsic.rs"),
::tracing_core::__macro_support::Option::Some(888u32),
::tracing_core::__macro_support::Option::Some("rustc_codegen_llvm::intrinsic"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("call intrinsic {0:?} with args ({1:?})",
instance, llargs) as &dyn Value))])
});
} else { ; }
};debug!("call intrinsic {:?} with args ({:?})", instance, llargs);
889
890 for (dest_ty, arg) in iter::zip(self.func_params_types(fn_ty), &mut llargs) {
891 let src_ty = self.val_ty(arg);
892 if !can_autocast(self, src_ty, dest_ty) {
{
::core::panicking::panic_fmt(format_args!("Cannot match `{0:?}` (expected) with {1:?} (found) in `{2:?}",
dest_ty, src_ty, fn_ptr));
}
};assert!(
893 can_autocast(self, src_ty, dest_ty),
894 "Cannot match `{dest_ty:?}` (expected) with {src_ty:?} (found) in `{fn_ptr:?}"
895 );
896
897 *arg = autocast(self, arg, src_ty, dest_ty);
898 }
899
900 let llret = unsafe {
901 llvm::LLVMBuildCallWithOperandBundles(
902 self.llbuilder,
903 fn_ty,
904 fn_ptr,
905 llargs.as_ptr(),
906 llargs.len() as c_uint,
907 ptr::dangling(),
908 0,
909 c"".as_ptr(),
910 )
911 };
912
913 let src_ty = self.val_ty(llret);
914 let dest_ty = llreturn_ty;
915 if !can_autocast(self, dest_ty, src_ty) {
{
::core::panicking::panic_fmt(format_args!("Cannot match `{0:?}` (expected) with `{1:?}` (found) in `{2:?}`",
src_ty, dest_ty, fn_ptr));
}
};assert!(
916 can_autocast(self, dest_ty, src_ty),
917 "Cannot match `{src_ty:?}` (expected) with `{dest_ty:?}` (found) in `{fn_ptr:?}`"
918 );
919
920 autocast(self, llret, src_ty, dest_ty)
921 }
922
923 fn abort(&mut self) {
924 self.call_intrinsic("llvm.trap", &[], &[]);
925 }
926
927 fn assume(&mut self, val: Self::Value) {
928 if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
929 self.call_intrinsic("llvm.assume", &[], &[val]);
930 }
931 }
932
933 fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
934 if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No {
935 self.call_intrinsic(
936 "llvm.expect",
937 &[self.type_i1()],
938 &[cond, self.const_bool(expected)],
939 )
940 } else {
941 cond
942 }
943 }
944
945 fn type_checked_load(
946 &mut self,
947 llvtable: &'ll Value,
948 vtable_byte_offset: u64,
949 typeid: &[u8],
950 ) -> Self::Value {
951 let typeid = self.create_metadata(typeid);
952 let typeid = self.get_metadata_value(typeid);
953 let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
954 let type_checked_load = self.call_intrinsic(
955 "llvm.type.checked.load",
956 &[],
957 &[llvtable, vtable_byte_offset, typeid],
958 );
959 self.extract_value(type_checked_load, 0)
960 }
961
962 fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
963 self.call_intrinsic("llvm.va_start", &[self.val_ty(va_list)], &[va_list])
964 }
965
966 fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
967 self.call_intrinsic("llvm.va_end", &[self.val_ty(va_list)], &[va_list])
968 }
969}
970
971fn llvm_arch_for(rust_arch: &Arch) -> Option<&'static str> {
972 Some(match rust_arch {
973 Arch::AArch64 | Arch::Arm64EC => "aarch64",
974 Arch::AmdGpu => "amdgcn",
975 Arch::Arm => "arm",
976 Arch::Bpf => "bpf",
977 Arch::Hexagon => "hexagon",
978 Arch::LoongArch32 | Arch::LoongArch64 => "loongarch",
979 Arch::Mips | Arch::Mips32r6 | Arch::Mips64 | Arch::Mips64r6 => "mips",
980 Arch::Nvptx64 => "nvvm",
981 Arch::PowerPC | Arch::PowerPC64 => "ppc",
982 Arch::RiscV32 | Arch::RiscV64 => "riscv",
983 Arch::S390x => "s390",
984 Arch::SpirV => "spv",
985 Arch::Wasm32 | Arch::Wasm64 => "wasm",
986 Arch::X86 | Arch::X86_64 => "x86",
987 _ => return None, })
989}
990
991fn can_autocast<'ll>(cx: &CodegenCx<'ll, '_>, rust_ty: &'ll Type, llvm_ty: &'ll Type) -> bool {
992 if rust_ty == llvm_ty {
993 return true;
994 }
995
996 match cx.type_kind(llvm_ty) {
997 TypeKind::Struct if cx.type_kind(rust_ty) == TypeKind::Struct => {
1001 let rust_element_tys = cx.struct_element_types(rust_ty);
1002 let llvm_element_tys = cx.struct_element_types(llvm_ty);
1003
1004 if rust_element_tys.len() != llvm_element_tys.len() {
1005 return false;
1006 }
1007
1008 iter::zip(rust_element_tys, llvm_element_tys).all(
1009 |(rust_element_ty, llvm_element_ty)| {
1010 can_autocast(cx, rust_element_ty, llvm_element_ty)
1011 },
1012 )
1013 }
1014 TypeKind::Vector => {
1015 let llvm_element_ty = cx.element_type(llvm_ty);
1016 let element_count = cx.vector_length(llvm_ty) as u64;
1017
1018 if llvm_element_ty == cx.type_bf16() {
1019 rust_ty == cx.type_vector(cx.type_i16(), element_count)
1020 } else if llvm_element_ty == cx.type_i1() {
1021 let int_width = element_count.next_power_of_two().max(8);
1022 rust_ty == cx.type_ix(int_width)
1023 } else {
1024 false
1025 }
1026 }
1027 TypeKind::BFloat => rust_ty == cx.type_i16(),
1028 _ => false,
1029 }
1030}
1031
1032fn autocast<'ll>(
1033 bx: &mut Builder<'_, 'll, '_>,
1034 val: &'ll Value,
1035 src_ty: &'ll Type,
1036 dest_ty: &'ll Type,
1037) -> &'ll Value {
1038 if src_ty == dest_ty {
1039 return val;
1040 }
1041 match (bx.type_kind(src_ty), bx.type_kind(dest_ty)) {
1042 (TypeKind::Struct, TypeKind::Struct) => {
1044 let mut ret = bx.const_poison(dest_ty);
1045 for (idx, (src_element_ty, dest_element_ty)) in
1046 iter::zip(bx.struct_element_types(src_ty), bx.struct_element_types(dest_ty))
1047 .enumerate()
1048 {
1049 let elt = bx.extract_value(val, idx as u64);
1050 let casted_elt = autocast(bx, elt, src_element_ty, dest_element_ty);
1051 ret = bx.insert_value(ret, casted_elt, idx as u64);
1052 }
1053 ret
1054 }
1055 (TypeKind::Vector, TypeKind::Integer) if bx.element_type(src_ty) == bx.type_i1() => {
1057 let vector_length = bx.vector_length(src_ty) as u64;
1058 let int_width = vector_length.next_power_of_two().max(8);
1059
1060 let val = if vector_length == int_width {
1061 val
1062 } else {
1063 let shuffle_indices = match vector_length {
1065 0 => {
::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
format_args!("zero length vectors are not allowed")));
}unreachable!("zero length vectors are not allowed"),
1066 1 => ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[0, 1, 1, 1, 1, 1, 1, 1]))vec![0, 1, 1, 1, 1, 1, 1, 1],
1067 2 => ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[0, 1, 2, 2, 2, 2, 2, 2]))vec![0, 1, 2, 2, 2, 2, 2, 2],
1068 3 => ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[0, 1, 2, 3, 3, 3, 3, 3]))vec![0, 1, 2, 3, 3, 3, 3, 3],
1069 4.. => (0..int_width as i32).collect(),
1070 };
1071 let shuffle_mask =
1072 shuffle_indices.into_iter().map(|i| bx.const_i32(i)).collect::<Vec<_>>();
1073 bx.shuffle_vector(val, bx.const_null(src_ty), bx.const_vector(&shuffle_mask))
1074 };
1075 bx.bitcast(val, dest_ty)
1076 }
1077 (TypeKind::Integer, TypeKind::Vector) if bx.element_type(dest_ty) == bx.type_i1() => {
1079 let vector_length = bx.vector_length(dest_ty) as u64;
1080 let int_width = vector_length.next_power_of_two().max(8);
1081
1082 let intermediate_ty = bx.type_vector(bx.type_i1(), int_width);
1083 let intermediate = bx.bitcast(val, intermediate_ty);
1084
1085 if vector_length == int_width {
1086 intermediate
1087 } else {
1088 let shuffle_mask: Vec<_> =
1089 (0..vector_length).map(|i| bx.const_i32(i as i32)).collect();
1090 bx.shuffle_vector(
1091 intermediate,
1092 bx.const_poison(intermediate_ty),
1093 bx.const_vector(&shuffle_mask),
1094 )
1095 }
1096 }
1097 _ => bx.bitcast(val, dest_ty), }
1099}
1100
1101fn intrinsic_fn<'ll, 'tcx>(
1102 bx: &Builder<'_, 'll, 'tcx>,
1103 name: &str,
1104 rust_return_ty: &'ll Type,
1105 rust_argument_tys: Vec<&'ll Type>,
1106 instance: ty::Instance<'tcx>,
1107) -> &'ll Value {
1108 let tcx = bx.tcx;
1109
1110 let rust_fn_ty = bx.type_func(&rust_argument_tys, rust_return_ty);
1111
1112 let intrinsic = llvm::Intrinsic::lookup(name.as_bytes());
1113
1114 if let Some(intrinsic) = intrinsic
1115 && intrinsic.is_target_specific()
1116 {
1117 let (llvm_arch, _) = name[5..].split_once('.').unwrap();
1118 let rust_arch = &tcx.sess.target.arch;
1119
1120 if let Some(correct_llvm_arch) = llvm_arch_for(rust_arch)
1121 && llvm_arch != correct_llvm_arch
1122 {
1123 tcx.dcx().emit_fatal(IntrinsicWrongArch {
1124 name,
1125 target_arch: rust_arch.desc(),
1126 span: tcx.def_span(instance.def_id()),
1127 });
1128 }
1129 }
1130
1131 if let Some(intrinsic) = intrinsic
1132 && !intrinsic.is_overloaded()
1133 {
1134 let llfn = intrinsic.get_declaration(bx.llmod, &[]);
1136 let llvm_fn_ty = bx.get_type_of_global(llfn);
1137
1138 let llvm_return_ty = bx.get_return_type(llvm_fn_ty);
1139 let llvm_argument_tys = bx.func_params_types(llvm_fn_ty);
1140 let llvm_is_variadic = bx.func_is_variadic(llvm_fn_ty);
1141
1142 let is_correct_signature = !llvm_is_variadic
1143 && rust_argument_tys.len() == llvm_argument_tys.len()
1144 && iter::once((rust_return_ty, llvm_return_ty))
1145 .chain(iter::zip(rust_argument_tys, llvm_argument_tys))
1146 .all(|(rust_ty, llvm_ty)| can_autocast(bx, rust_ty, llvm_ty));
1147
1148 if !is_correct_signature {
1149 tcx.dcx().emit_fatal(IntrinsicSignatureMismatch {
1150 name,
1151 llvm_fn_ty: &::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0:?}", llvm_fn_ty))
})format!("{llvm_fn_ty:?}"),
1152 rust_fn_ty: &::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0:?}", rust_fn_ty))
})format!("{rust_fn_ty:?}"),
1153 span: tcx.def_span(instance.def_id()),
1154 });
1155 }
1156
1157 return llfn;
1158 }
1159
1160 let llfn = declare_raw_fn(
1162 bx,
1163 name,
1164 llvm::CCallConv,
1165 llvm::UnnamedAddr::Global,
1166 llvm::Visibility::Default,
1167 rust_fn_ty,
1168 );
1169
1170 if intrinsic.is_none() {
1171 let mut new_llfn = None;
1172 let can_upgrade = unsafe { llvm::LLVMRustUpgradeIntrinsicFunction(llfn, &mut new_llfn) };
1173
1174 if !can_upgrade {
1175 tcx.dcx().emit_fatal(UnknownIntrinsic { name, span: tcx.def_span(instance.def_id()) });
1177 } else if let Some(def_id) = instance.def_id().as_local() {
1178 let hir_id = tcx.local_def_id_to_hir_id(def_id);
1180
1181 let msg = if let Some(new_llfn) = new_llfn {
1183 ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("using deprecated intrinsic `{1}`, `{0}` can be used instead",
str::from_utf8(&llvm::get_value_name(new_llfn)).unwrap(),
name))
})format!(
1184 "using deprecated intrinsic `{name}`, `{}` can be used instead",
1185 str::from_utf8(&llvm::get_value_name(new_llfn)).unwrap()
1186 )
1187 } else {
1188 ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("using deprecated intrinsic `{0}`",
name))
})format!("using deprecated intrinsic `{name}`")
1189 };
1190
1191 tcx.emit_node_lint(
1192 DEPRECATED_LLVM_INTRINSIC,
1193 hir_id,
1194 rustc_errors::DiagDecorator(|d| {
1195 d.primary_message(msg).span(tcx.hir_span(hir_id));
1196 }),
1197 );
1198 }
1199 }
1200
1201 llfn
1202}
1203
1204fn catch_unwind_intrinsic<'ll, 'tcx>(
1205 bx: &mut Builder<'_, 'll, 'tcx>,
1206 try_func: &'ll Value,
1207 data: &'ll Value,
1208 catch_func: &'ll Value,
1209 dest: PlaceRef<'tcx, &'ll Value>,
1210) {
1211 if !bx.sess().panic_strategy().unwinds() {
1212 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1213 bx.call(try_func_ty, None, None, try_func, &[data], None, None);
1214 OperandValue::Immediate(bx.const_i32(0)).store(bx, dest);
1217 } else if wants_msvc_seh(bx.sess()) {
1218 codegen_msvc_try(bx, try_func, data, catch_func, dest);
1219 } else if wants_wasm_eh(bx.sess()) {
1220 codegen_wasm_try(bx, try_func, data, catch_func, dest);
1221 } else if bx.sess().target.os == Os::Emscripten {
1222 codegen_emcc_try(bx, try_func, data, catch_func, dest);
1223 } else {
1224 codegen_gnu_try(bx, try_func, data, catch_func, dest);
1225 }
1226}
1227
1228fn codegen_msvc_try<'ll, 'tcx>(
1236 bx: &mut Builder<'_, 'll, 'tcx>,
1237 try_func: &'ll Value,
1238 data: &'ll Value,
1239 catch_func: &'ll Value,
1240 dest: PlaceRef<'tcx, &'ll Value>,
1241) {
1242 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
1243 bx.set_personality_fn(bx.eh_personality());
1244
1245 let normal = bx.append_sibling_block("normal");
1246 let catchswitch = bx.append_sibling_block("catchswitch");
1247 let catchpad_rust = bx.append_sibling_block("catchpad_rust");
1248 let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
1249 let caught = bx.append_sibling_block("caught");
1250
1251 let try_func = llvm::get_param(bx.llfn(), 0);
1252 let data = llvm::get_param(bx.llfn(), 1);
1253 let catch_func = llvm::get_param(bx.llfn(), 2);
1254
1255 let ptr_size = bx.tcx().data_layout.pointer_size();
1311 let ptr_align = bx.tcx().data_layout.pointer_align().abi;
1312 let slot = bx.alloca(ptr_size, ptr_align);
1313 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1314 bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
1315
1316 bx.switch_to_block(normal);
1317 bx.ret(bx.const_i32(0));
1318
1319 bx.switch_to_block(catchswitch);
1320 let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
1321
1322 let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
1337 let type_name = bx.const_bytes(b"rust_panic\0");
1338 let type_info =
1339 bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
1340 let tydesc = bx.declare_global(
1341 &mangle_internal_symbol(bx.tcx, "__rust_panic_type_info"),
1342 bx.val_ty(type_info),
1343 );
1344
1345 llvm::set_linkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
1346 if bx.cx.tcx.sess.target.supports_comdat() {
1347 llvm::SetUniqueComdat(bx.llmod, tydesc);
1348 }
1349 llvm::set_initializer(tydesc, type_info);
1350
1351 bx.switch_to_block(catchpad_rust);
1358 let flags = bx.const_i32(8);
1359 let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
1360 let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
1361 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1362 bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
1363 bx.catch_ret(&funclet, caught);
1364
1365 bx.switch_to_block(catchpad_foreign);
1367 let flags = bx.const_i32(64);
1368 let null = bx.const_null(bx.type_ptr());
1369 let funclet = bx.catch_pad(cs, &[null, flags, null]);
1370 bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet), None);
1371 bx.catch_ret(&funclet, caught);
1372
1373 bx.switch_to_block(caught);
1374 bx.ret(bx.const_i32(1));
1375 });
1376
1377 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1380 OperandValue::Immediate(ret).store(bx, dest);
1381}
1382
1383fn codegen_wasm_try<'ll, 'tcx>(
1385 bx: &mut Builder<'_, 'll, 'tcx>,
1386 try_func: &'ll Value,
1387 data: &'ll Value,
1388 catch_func: &'ll Value,
1389 dest: PlaceRef<'tcx, &'ll Value>,
1390) {
1391 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
1392 bx.set_personality_fn(bx.eh_personality());
1393
1394 let normal = bx.append_sibling_block("normal");
1395 let catchswitch = bx.append_sibling_block("catchswitch");
1396 let catchpad = bx.append_sibling_block("catchpad");
1397 let caught = bx.append_sibling_block("caught");
1398
1399 let try_func = llvm::get_param(bx.llfn(), 0);
1400 let data = llvm::get_param(bx.llfn(), 1);
1401 let catch_func = llvm::get_param(bx.llfn(), 2);
1402
1403 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1427 bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
1428
1429 bx.switch_to_block(normal);
1430 bx.ret(bx.const_i32(0));
1431
1432 bx.switch_to_block(catchswitch);
1433 let cs = bx.catch_switch(None, None, &[catchpad]);
1434
1435 bx.switch_to_block(catchpad);
1436 let null = bx.const_null(bx.type_ptr());
1437 let funclet = bx.catch_pad(cs, &[null]);
1438
1439 let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[], &[funclet.cleanuppad()]);
1440 let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[], &[funclet.cleanuppad()]);
1441
1442 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1443 bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None);
1444 bx.catch_ret(&funclet, caught);
1445
1446 bx.switch_to_block(caught);
1447 bx.ret(bx.const_i32(1));
1448 });
1449
1450 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1453 OperandValue::Immediate(ret).store(bx, dest);
1454}
1455
1456fn codegen_gnu_try<'ll, 'tcx>(
1468 bx: &mut Builder<'_, 'll, 'tcx>,
1469 try_func: &'ll Value,
1470 data: &'ll Value,
1471 catch_func: &'ll Value,
1472 dest: PlaceRef<'tcx, &'ll Value>,
1473) {
1474 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
1475 let then = bx.append_sibling_block("then");
1488 let catch = bx.append_sibling_block("catch");
1489
1490 let try_func = llvm::get_param(bx.llfn(), 0);
1491 let data = llvm::get_param(bx.llfn(), 1);
1492 let catch_func = llvm::get_param(bx.llfn(), 2);
1493 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1494 bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
1495
1496 bx.switch_to_block(then);
1497 bx.ret(bx.const_i32(0));
1498
1499 bx.switch_to_block(catch);
1506 let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
1507 let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
1508 let tydesc = bx.const_null(bx.type_ptr());
1509 bx.add_clause(vals, tydesc);
1510 let ptr = bx.extract_value(vals, 0);
1511 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1512 bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None);
1513 bx.ret(bx.const_i32(1));
1514 });
1515
1516 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1519 OperandValue::Immediate(ret).store(bx, dest);
1520}
1521
1522fn codegen_emcc_try<'ll, 'tcx>(
1526 bx: &mut Builder<'_, 'll, 'tcx>,
1527 try_func: &'ll Value,
1528 data: &'ll Value,
1529 catch_func: &'ll Value,
1530 dest: PlaceRef<'tcx, &'ll Value>,
1531) {
1532 let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
1533 let then = bx.append_sibling_block("then");
1551 let catch = bx.append_sibling_block("catch");
1552
1553 let try_func = llvm::get_param(bx.llfn(), 0);
1554 let data = llvm::get_param(bx.llfn(), 1);
1555 let catch_func = llvm::get_param(bx.llfn(), 2);
1556 let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
1557 bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None);
1558
1559 bx.switch_to_block(then);
1560 bx.ret(bx.const_i32(0));
1561
1562 bx.switch_to_block(catch);
1568 let tydesc = bx.eh_catch_typeinfo();
1569 let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
1570 let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
1571 bx.add_clause(vals, tydesc);
1572 bx.add_clause(vals, bx.const_null(bx.type_ptr()));
1573 let ptr = bx.extract_value(vals, 0);
1574 let selector = bx.extract_value(vals, 1);
1575
1576 let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[bx.val_ty(tydesc)], &[tydesc]);
1578 let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
1579 let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
1580
1581 let ptr_size = bx.tcx().data_layout.pointer_size();
1584 let ptr_align = bx.tcx().data_layout.pointer_align().abi;
1585 let i8_align = bx.tcx().data_layout.i8_align;
1586 if !(i8_align <= ptr_align) {
::core::panicking::panic("assertion failed: i8_align <= ptr_align")
};assert!(i8_align <= ptr_align);
1588 let catch_data = bx.alloca(2 * ptr_size, ptr_align);
1589 bx.store(ptr, catch_data, ptr_align);
1590 let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
1591 bx.store(is_rust_panic, catch_data_1, i8_align);
1592
1593 let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
1594 bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None, None);
1595 bx.ret(bx.const_i32(1));
1596 });
1597
1598 let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None);
1601 OperandValue::Immediate(ret).store(bx, dest);
1602}
1603
1604fn gen_fn<'a, 'll, 'tcx>(
1607 cx: &'a CodegenCx<'ll, 'tcx>,
1608 name: &str,
1609 rust_fn_sig: ty::PolyFnSig<'tcx>,
1610 codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1611) -> (&'ll Type, &'ll Value) {
1612 let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
1613 let llty = fn_abi.llvm_type(cx);
1614 let llfn = cx.declare_fn(name, fn_abi, None);
1615 cx.set_frame_pointer_type(llfn);
1616 cx.apply_target_cpu_attr(llfn);
1617 llvm::set_linkage(llfn, llvm::Linkage::InternalLinkage);
1619 let llbb = Builder::append_block(cx, llfn, "entry-block");
1620 let bx = Builder::build(cx, llbb);
1621 codegen(bx);
1622 (llty, llfn)
1623}
1624
1625fn get_rust_try_fn<'a, 'll, 'tcx>(
1630 cx: &'a CodegenCx<'ll, 'tcx>,
1631 codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>),
1632) -> (&'ll Type, &'ll Value) {
1633 if let Some(llfn) = cx.rust_try_fn.get() {
1634 return llfn;
1635 }
1636
1637 let tcx = cx.tcx;
1639 let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
1640 let try_fn_ty = Ty::new_fn_ptr(
1642 tcx,
1643 ty::Binder::dummy(tcx.mk_fn_sig(
1644 [i8p],
1645 tcx.types.unit,
1646 false,
1647 hir::Safety::Unsafe,
1648 ExternAbi::Rust,
1649 )),
1650 );
1651 let catch_fn_ty = Ty::new_fn_ptr(
1653 tcx,
1654 ty::Binder::dummy(tcx.mk_fn_sig(
1655 [i8p, i8p],
1656 tcx.types.unit,
1657 false,
1658 hir::Safety::Unsafe,
1659 ExternAbi::Rust,
1660 )),
1661 );
1662 let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
1664 [try_fn_ty, i8p, catch_fn_ty],
1665 tcx.types.i32,
1666 false,
1667 hir::Safety::Unsafe,
1668 ExternAbi::Rust,
1669 ));
1670 let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
1671 cx.rust_try_fn.set(Some(rust_try));
1672 rust_try
1673}
1674
1675fn codegen_autodiff<'ll, 'tcx>(
1676 bx: &mut Builder<'_, 'll, 'tcx>,
1677 tcx: TyCtxt<'tcx>,
1678 instance: ty::Instance<'tcx>,
1679 args: &[OperandRef<'tcx, &'ll Value>],
1680 result: PlaceRef<'tcx, &'ll Value>,
1681) {
1682 if !tcx.sess.opts.unstable_opts.autodiff.contains(&rustc_session::config::AutoDiff::Enable) {
1683 let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutEnable);
1684 }
1685
1686 let ct = tcx.crate_types();
1687 let lto = tcx.sess.lto();
1688 if ct.len() == 1 && ct.contains(&CrateType::Executable) {
1689 if lto != rustc_session::config::Lto::Fat {
1690 let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutLto);
1691 }
1692 } else {
1693 if lto != rustc_session::config::Lto::Fat && !tcx.sess.opts.cg.linker_plugin_lto.enabled() {
1694 let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutLto);
1695 }
1696 }
1697
1698 let fn_args = instance.args;
1699 let callee_ty = instance.ty(tcx, bx.typing_env());
1700
1701 let sig = callee_ty.fn_sig(tcx).skip_binder();
1702
1703 let ret_ty = sig.output();
1704 let llret_ty = bx.layout_of(ret_ty).llvm_type(bx);
1705
1706 let source_fn_ptr_ty = fn_args.into_type_list(tcx)[0];
1707 let fn_to_diff = args[0].immediate();
1708
1709 let (diff_id, diff_args) = match fn_args.into_type_list(tcx)[1].kind() {
1710 ty::FnDef(def_id, diff_args) => (def_id, diff_args),
1711 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("invalid args"))bug!("invalid args"),
1712 };
1713
1714 let fn_diff = match Instance::try_resolve(tcx, bx.cx.typing_env(), *diff_id, diff_args) {
1715 Ok(Some(instance)) => instance,
1716 Ok(None) => ::rustc_middle::util::bug::bug_fmt(format_args!("could not resolve ({0:?}, {1:?}) to a specific autodiff instance",
diff_id, diff_args))bug!(
1717 "could not resolve ({:?}, {:?}) to a specific autodiff instance",
1718 diff_id,
1719 diff_args
1720 ),
1721 Err(_) => {
1722 return;
1724 }
1725 };
1726
1727 let val_arr = get_args_from_tuple(bx, args[2], fn_diff);
1728 let diff_symbol = symbol_name_for_instance_in_crate(tcx, fn_diff.clone(), LOCAL_CRATE);
1729
1730 let Some(Some(mut diff_attrs)) =
1731 {
{
'done:
{
for i in
::rustc_hir::attrs::HasAttrs::get_attrs(fn_diff.def_id(),
&tcx) {
#[allow(unused_imports)]
use rustc_hir::attrs::AttributeKind::*;
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(RustcAutodiff(attr)) => {
break 'done Some(attr.clone());
}
rustc_hir::Attribute::Unparsed(..) =>
{}
#[deny(unreachable_patterns)]
_ => {}
}
}
None
}
}
}find_attr!(tcx, fn_diff.def_id(), RustcAutodiff(attr) => attr.clone())
1732 else {
1733 ::rustc_middle::util::bug::bug_fmt(format_args!("could not find autodiff attrs"))bug!("could not find autodiff attrs")
1734 };
1735
1736 adjust_activity_to_abi(
1737 tcx,
1738 source_fn_ptr_ty,
1739 TypingEnv::fully_monomorphized(),
1740 &mut diff_attrs.input_activity,
1741 );
1742
1743 let fnc_tree = rustc_middle::ty::fnc_typetrees(tcx, source_fn_ptr_ty);
1744
1745 generate_enzyme_call(
1747 bx,
1748 bx.cx,
1749 fn_to_diff,
1750 &diff_symbol,
1751 llret_ty,
1752 &val_arr,
1753 &diff_attrs,
1754 result,
1755 fnc_tree,
1756 );
1757}
1758
1759fn codegen_offload<'ll, 'tcx>(
1764 bx: &mut Builder<'_, 'll, 'tcx>,
1765 tcx: TyCtxt<'tcx>,
1766 instance: ty::Instance<'tcx>,
1767 args: &[OperandRef<'tcx, &'ll Value>],
1768) {
1769 let cx = bx.cx;
1770 let fn_args = instance.args;
1771
1772 let (target_id, target_args) = match fn_args.into_type_list(tcx)[0].kind() {
1773 ty::FnDef(def_id, params) => (def_id, params),
1774 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("invalid offload intrinsic arg"))bug!("invalid offload intrinsic arg"),
1775 };
1776
1777 let fn_target = match Instance::try_resolve(tcx, cx.typing_env(), *target_id, target_args) {
1778 Ok(Some(instance)) => instance,
1779 Ok(None) => ::rustc_middle::util::bug::bug_fmt(format_args!("could not resolve ({0:?}, {1:?}) to a specific offload instance",
target_id, target_args))bug!(
1780 "could not resolve ({:?}, {:?}) to a specific offload instance",
1781 target_id,
1782 target_args
1783 ),
1784 Err(_) => {
1785 return;
1787 }
1788 };
1789
1790 let offload_dims = OffloadKernelDims::from_operands(bx, &args[1], &args[2]);
1791 let args = get_args_from_tuple(bx, args[3], fn_target);
1792 let target_symbol = symbol_name_for_instance_in_crate(tcx, fn_target, LOCAL_CRATE);
1793
1794 let sig = tcx.fn_sig(fn_target.def_id()).skip_binder();
1795 let sig = tcx.instantiate_bound_regions_with_erased(sig);
1796 let inputs = sig.inputs();
1797
1798 let metadata = inputs.iter().map(|ty| OffloadMetadata::from_ty(tcx, *ty)).collect::<Vec<_>>();
1799
1800 let types = inputs.iter().map(|ty| cx.layout_of(*ty).llvm_type(cx)).collect::<Vec<_>>();
1801
1802 let offload_globals_ref = cx.offload_globals.borrow();
1803 let offload_globals = match offload_globals_ref.as_ref() {
1804 Some(globals) => globals,
1805 None => {
1806 return;
1808 }
1809 };
1810 register_offload(cx);
1811 let offload_data = gen_define_handling(&cx, &metadata, target_symbol, offload_globals);
1812 gen_call_handling(bx, &offload_data, &args, &types, &metadata, offload_globals, &offload_dims);
1813}
1814
1815fn get_args_from_tuple<'ll, 'tcx>(
1816 bx: &mut Builder<'_, 'll, 'tcx>,
1817 tuple_op: OperandRef<'tcx, &'ll Value>,
1818 fn_instance: Instance<'tcx>,
1819) -> Vec<&'ll Value> {
1820 let cx = bx.cx;
1821 let fn_abi = cx.fn_abi_of_instance(fn_instance, ty::List::empty());
1822
1823 match tuple_op.val {
1824 OperandValue::Immediate(val) => ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[val]))vec![val],
1825 OperandValue::Pair(v1, v2) => ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[v1, v2]))vec![v1, v2],
1826 OperandValue::Ref(ptr) => {
1827 let tuple_place = PlaceRef { val: ptr, layout: tuple_op.layout };
1828
1829 let mut result = Vec::with_capacity(fn_abi.args.len());
1830 let mut tuple_index = 0;
1831
1832 for arg in &fn_abi.args {
1833 match arg.mode {
1834 PassMode::Ignore => {}
1835 PassMode::Direct(_) | PassMode::Cast { .. } => {
1836 let field = tuple_place.project_field(bx, tuple_index);
1837 let llvm_ty = field.layout.llvm_type(bx.cx);
1838 let val = bx.load(llvm_ty, field.val.llval, field.val.align);
1839 result.push(val);
1840 tuple_index += 1;
1841 }
1842 PassMode::Pair(_, _) => {
1843 let field = tuple_place.project_field(bx, tuple_index);
1844 let llvm_ty = field.layout.llvm_type(bx.cx);
1845 let pair_val = bx.load(llvm_ty, field.val.llval, field.val.align);
1846 result.push(bx.extract_value(pair_val, 0));
1847 result.push(bx.extract_value(pair_val, 1));
1848 tuple_index += 1;
1849 }
1850 PassMode::Indirect { .. } => {
1851 let field = tuple_place.project_field(bx, tuple_index);
1852 result.push(field.val.llval);
1853 tuple_index += 1;
1854 }
1855 }
1856 }
1857
1858 result
1859 }
1860
1861 OperandValue::ZeroSized => ::alloc::vec::Vec::new()vec![],
1862 }
1863}
1864
1865fn generic_simd_intrinsic<'ll, 'tcx>(
1866 bx: &mut Builder<'_, 'll, 'tcx>,
1867 name: Symbol,
1868 fn_args: GenericArgsRef<'tcx>,
1869 args: &[OperandRef<'tcx, &'ll Value>],
1870 ret_ty: Ty<'tcx>,
1871 llret_ty: &'ll Type,
1872 span: Span,
1873) -> Result<&'ll Value, ()> {
1874 macro_rules! return_error {
1875 ($diag: expr) => {{
1876 bx.sess().dcx().emit_err($diag);
1877 return Err(());
1878 }};
1879 }
1880
1881 macro_rules! require {
1882 ($cond: expr, $diag: expr) => {
1883 if !$cond {
1884 return_error!($diag);
1885 }
1886 };
1887 }
1888
1889 macro_rules! require_simd {
1890 ($ty: expr, $variant:ident) => {{
1891 require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
1892 $ty.simd_size_and_type(bx.tcx())
1893 }};
1894 }
1895
1896 macro_rules! require_simd_or_scalable {
1897 ($ty: expr, $variant:ident) => {{
1898 require!(
1899 $ty.is_simd() || $ty.is_scalable_vector(),
1900 InvalidMonomorphization::$variant { span, name, ty: $ty }
1901 );
1902 if $ty.is_simd() {
1903 let (len, ty) = $ty.simd_size_and_type(bx.tcx());
1904 (len, ty, None)
1905 } else {
1906 let (count, ty, num_vecs) =
1907 $ty.scalable_vector_parts(bx.tcx()).expect("`is_scalable_vector` was wrong");
1908 (count as u64, ty, Some(num_vecs))
1909 }
1910 }};
1911 }
1912
1913 macro_rules! require_int_or_uint_ty {
1915 ($ty: expr, $diag: expr) => {
1916 match $ty {
1917 ty::Int(i) => {
1918 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1919 }
1920 ty::Uint(i) => {
1921 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
1922 }
1923 _ => {
1924 return_error!($diag);
1925 }
1926 }
1927 };
1928 }
1929
1930 let llvm_version = crate::llvm_util::get_version();
1931
1932 fn vector_mask_to_bitmask<'a, 'll, 'tcx>(
1946 bx: &mut Builder<'a, 'll, 'tcx>,
1947 i_xn: &'ll Value,
1948 in_elem_bitwidth: u64,
1949 in_len: u64,
1950 ) -> &'ll Value {
1951 let shift_idx = bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1953 let shift_indices = ::alloc::vec::from_elem(shift_idx, in_len as _)vec![shift_idx; in_len as _];
1954 let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1955 bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
1957 }
1958
1959 if truecfg!(debug_assertions) {
1961 for arg in args {
1962 if arg.layout.ty.is_simd() {
1963 match arg.val {
OperandValue::Immediate(_) => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"OperandValue::Immediate(_)", ::core::option::Option::None);
}
};assert_matches!(arg.val, OperandValue::Immediate(_));
1964 }
1965 }
1966 }
1967
1968 if name == sym::simd_select_bitmask {
1969 let (len, _) = {
if !args[1].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdArgument {
span,
name,
ty: args[1].layout.ty,
});
return Err(());
};
};
args[1].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[1].layout.ty, SimdArgument);
1970
1971 let expected_int_bits = len.max(8).next_power_of_two();
1972 let expected_bytes = len.div_ceil(8);
1973
1974 let mask_ty = args[0].layout.ty;
1975 let mask = match mask_ty.kind() {
1976 ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1977 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
1978 ty::Array(elem, len)
1979 if #[allow(non_exhaustive_omitted_patterns)] match elem.kind() {
ty::Uint(ty::UintTy::U8) => true,
_ => false,
}matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
1980 && len
1981 .try_to_target_usize(bx.tcx)
1982 .expect("expected monomorphic const in codegen")
1983 == expected_bytes =>
1984 {
1985 let place = PlaceRef::alloca(bx, args[0].layout);
1986 args[0].val.store(bx, place);
1987 let int_ty = bx.type_ix(expected_bytes * 8);
1988 bx.load(int_ty, place.val.llval, Align::ONE)
1989 }
1990 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::InvalidBitmask {
span,
name,
mask_ty,
expected_int_bits,
expected_bytes,
});
return Err(());
}return_error!(InvalidMonomorphization::InvalidBitmask {
1991 span,
1992 name,
1993 mask_ty,
1994 expected_int_bits,
1995 expected_bytes
1996 }),
1997 };
1998
1999 let i1 = bx.type_i1();
2000 let im = bx.type_ix(len);
2001 let i1xn = bx.type_vector(i1, len);
2002 let m_im = bx.trunc(mask, im);
2003 let m_i1s = bx.bitcast(m_im, i1xn);
2004 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
2005 }
2006
2007 if name == sym::simd_splat {
2008 let (_out_len, out_ty) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
2009
2010 if !(args[0].layout.ty == out_ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedVectorElementType {
span,
name,
expected_element: out_ty,
vector_type: ret_ty,
});
return Err(());
};
};require!(
2011 args[0].layout.ty == out_ty,
2012 InvalidMonomorphization::ExpectedVectorElementType {
2013 span,
2014 name,
2015 expected_element: out_ty,
2016 vector_type: ret_ty,
2017 }
2018 );
2019
2020 let poison_vec = bx.const_poison(llret_ty);
2022 let idx0 = bx.const_i32(0);
2023 let v0 = bx.insert_element(poison_vec, args[0].immediate(), idx0);
2024
2025 let splat = bx.shuffle_vector(v0, poison_vec, bx.const_null(llret_ty));
2028
2029 return Ok(splat);
2030 }
2031
2032 let supports_scalable = match name {
2033 sym::simd_cast | sym::simd_select => true,
2034 _ => false,
2035 };
2036
2037 if !supports_scalable {
2042 let _ = {
if !args[0].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdInput {
span,
name,
ty: args[0].layout.ty,
});
return Err(());
};
};
args[0].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[0].layout.ty, SimdInput);
2043 }
2044 let (in_len, in_elem, in_num_vecs) = {
if !(args[0].layout.ty.is_simd() ||
args[0].layout.ty.is_scalable_vector()) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdInput {
span,
name,
ty: args[0].layout.ty,
});
return Err(());
};
};
if args[0].layout.ty.is_simd() {
let (len, ty) = args[0].layout.ty.simd_size_and_type(bx.tcx());
(len, ty, None)
} else {
let (count, ty, num_vecs) =
args[0].layout.ty.scalable_vector_parts(bx.tcx()).expect("`is_scalable_vector` was wrong");
(count as u64, ty, Some(num_vecs))
}
}require_simd_or_scalable!(args[0].layout.ty, SimdInput);
2045 let in_ty = args[0].layout.ty;
2046
2047 let comparison = match name {
2048 sym::simd_eq => Some(BinOp::Eq),
2049 sym::simd_ne => Some(BinOp::Ne),
2050 sym::simd_lt => Some(BinOp::Lt),
2051 sym::simd_le => Some(BinOp::Le),
2052 sym::simd_gt => Some(BinOp::Gt),
2053 sym::simd_ge => Some(BinOp::Ge),
2054 _ => None,
2055 };
2056
2057 if let Some(cmp_op) = comparison {
2058 let (out_len, out_ty) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
2059
2060 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len,
});
return Err(());
};
};require!(
2061 in_len == out_len,
2062 InvalidMonomorphization::ReturnLengthInputType {
2063 span,
2064 name,
2065 in_len,
2066 in_ty,
2067 ret_ty,
2068 out_len
2069 }
2070 );
2071 if !(bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnIntegerType {
span,
name,
ret_ty,
out_ty,
});
return Err(());
};
};require!(
2072 bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
2073 InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
2074 );
2075
2076 return Ok(compare_simd_types(
2077 bx,
2078 args[0].immediate(),
2079 args[1].immediate(),
2080 in_elem,
2081 llret_ty,
2082 cmp_op,
2083 ));
2084 }
2085
2086 if name == sym::simd_shuffle_const_generic {
2087 let idx = fn_args[2].expect_const().to_branch();
2088 let n = idx.len() as u64;
2089
2090 let (out_len, out_ty) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
2091 if !(out_len == n) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLength {
span,
name,
in_len: n,
ret_ty,
out_len,
});
return Err(());
};
};require!(
2092 out_len == n,
2093 InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
2094 );
2095 if !(in_elem == out_ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnElement {
span,
name,
in_elem,
in_ty,
ret_ty,
out_ty,
});
return Err(());
};
};require!(
2096 in_elem == out_ty,
2097 InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
2098 );
2099
2100 let total_len = in_len * 2;
2101
2102 let indices: Option<Vec<_>> = idx
2103 .iter()
2104 .enumerate()
2105 .map(|(arg_idx, val)| {
2106 let idx = val.to_leaf().to_i32();
2107 if idx >= i32::try_from(total_len).unwrap() {
2108 bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
2109 span,
2110 name,
2111 arg_idx: arg_idx as u64,
2112 total_len: total_len.into(),
2113 });
2114 None
2115 } else {
2116 Some(bx.const_i32(idx))
2117 }
2118 })
2119 .collect();
2120 let Some(indices) = indices else {
2121 return Ok(bx.const_null(llret_ty));
2122 };
2123
2124 return Ok(bx.shuffle_vector(
2125 args[0].immediate(),
2126 args[1].immediate(),
2127 bx.const_vector(&indices),
2128 ));
2129 }
2130
2131 if name == sym::simd_shuffle {
2132 let idx_ty = args[2].layout.ty;
2134 let n: u64 = if idx_ty.is_simd()
2135 && #[allow(non_exhaustive_omitted_patterns)] match idx_ty.simd_size_and_type(bx.cx.tcx).1.kind()
{
ty::Uint(ty::UintTy::U32) => true,
_ => false,
}matches!(idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32))
2136 {
2137 idx_ty.simd_size_and_type(bx.cx.tcx).0
2138 } else {
2139 {
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdShuffle {
span,
name,
ty: idx_ty,
});
return Err(());
}return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty })
2140 };
2141
2142 let (out_len, out_ty) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
2143 if !(out_len == n) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLength {
span,
name,
in_len: n,
ret_ty,
out_len,
});
return Err(());
};
};require!(
2144 out_len == n,
2145 InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
2146 );
2147 if !(in_elem == out_ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnElement {
span,
name,
in_elem,
in_ty,
ret_ty,
out_ty,
});
return Err(());
};
};require!(
2148 in_elem == out_ty,
2149 InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
2150 );
2151
2152 let total_len = u128::from(in_len) * 2;
2153
2154 let indices = args[2].immediate();
2156 for i in 0..n {
2157 let val = bx.const_get_elt(indices, i as u64);
2158 let idx = bx
2159 .const_to_opt_u128(val, true)
2160 .unwrap_or_else(|| ::rustc_middle::util::bug::bug_fmt(format_args!("typeck should have already ensured that these are const"))bug!("typeck should have already ensured that these are const"));
2161 if idx >= total_len {
2162 {
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
span,
name,
arg_idx: i,
total_len,
});
return Err(());
};return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
2163 span,
2164 name,
2165 arg_idx: i,
2166 total_len,
2167 });
2168 }
2169 }
2170
2171 return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), indices));
2172 }
2173
2174 if name == sym::simd_insert || name == sym::simd_insert_dyn {
2175 if !(in_elem == args[2].layout.ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::InsertedType {
span,
name,
in_elem,
in_ty,
out_ty: args[2].layout.ty,
});
return Err(());
};
};require!(
2176 in_elem == args[2].layout.ty,
2177 InvalidMonomorphization::InsertedType {
2178 span,
2179 name,
2180 in_elem,
2181 in_ty,
2182 out_ty: args[2].layout.ty
2183 }
2184 );
2185
2186 let index_imm = if name == sym::simd_insert {
2187 let idx = bx
2188 .const_to_opt_u128(args[1].immediate(), false)
2189 .expect("typeck should have ensure that this is a const");
2190 if idx >= in_len.into() {
2191 {
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
span,
name,
arg_idx: 1,
total_len: in_len.into(),
});
return Err(());
};return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
2192 span,
2193 name,
2194 arg_idx: 1,
2195 total_len: in_len.into(),
2196 });
2197 }
2198 bx.const_i32(idx as i32)
2199 } else {
2200 args[1].immediate()
2201 };
2202
2203 return Ok(bx.insert_element(args[0].immediate(), args[2].immediate(), index_imm));
2204 }
2205 if name == sym::simd_extract || name == sym::simd_extract_dyn {
2206 if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};require!(
2207 ret_ty == in_elem,
2208 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2209 );
2210 let index_imm = if name == sym::simd_extract {
2211 let idx = bx
2212 .const_to_opt_u128(args[1].immediate(), false)
2213 .expect("typeck should have ensure that this is a const");
2214 if idx >= in_len.into() {
2215 {
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
span,
name,
arg_idx: 1,
total_len: in_len.into(),
});
return Err(());
};return_error!(InvalidMonomorphization::SimdIndexOutOfBounds {
2216 span,
2217 name,
2218 arg_idx: 1,
2219 total_len: in_len.into(),
2220 });
2221 }
2222 bx.const_i32(idx as i32)
2223 } else {
2224 args[1].immediate()
2225 };
2226
2227 return Ok(bx.extract_element(args[0].immediate(), index_imm));
2228 }
2229
2230 if name == sym::simd_select {
2231 let m_elem_ty = in_elem;
2232 let m_len = in_len;
2233 let (v_len, _, _) = {
if !(args[1].layout.ty.is_simd() ||
args[1].layout.ty.is_scalable_vector()) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdArgument {
span,
name,
ty: args[1].layout.ty,
});
return Err(());
};
};
if args[1].layout.ty.is_simd() {
let (len, ty) = args[1].layout.ty.simd_size_and_type(bx.tcx());
(len, ty, None)
} else {
let (count, ty, num_vecs) =
args[1].layout.ty.scalable_vector_parts(bx.tcx()).expect("`is_scalable_vector` was wrong");
(count as u64, ty, Some(num_vecs))
}
}require_simd_or_scalable!(args[1].layout.ty, SimdArgument);
2234 if !(m_len == v_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MismatchedLengths {
span,
name,
m_len,
v_len,
});
return Err(());
};
};require!(
2235 m_len == v_len,
2236 InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
2237 );
2238
2239 let m_i1s = if args[1].layout.ty.is_scalable_vector() {
2240 match m_elem_ty.kind() {
2241 ty::Bool => {}
2242 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: m_elem_ty,
});
return Err(());
}return_error!(InvalidMonomorphization::MaskWrongElementType {
2243 span,
2244 name,
2245 ty: m_elem_ty
2246 }),
2247 };
2248 let i1 = bx.type_i1();
2249 let i1xn = bx.type_scalable_vector(i1, m_len as u64);
2250 bx.trunc(args[0].immediate(), i1xn)
2251 } else {
2252 let in_elem_bitwidth = match m_elem_ty.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: m_elem_ty,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2253 m_elem_ty.kind(),
2254 InvalidMonomorphization::MaskWrongElementType { span, name, ty: m_elem_ty }
2255 );
2256 vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, m_len)
2257 };
2258
2259 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
2260 }
2261
2262 if name == sym::simd_bitmask {
2263 let expected_int_bits = in_len.max(8).next_power_of_two();
2272 let expected_bytes = in_len.div_ceil(8);
2273
2274 let in_elem_bitwidth = match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: in_elem,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2276 in_elem.kind(),
2277 InvalidMonomorphization::MaskWrongElementType { span, name, ty: in_elem }
2278 );
2279
2280 let i1xn = vector_mask_to_bitmask(bx, args[0].immediate(), in_elem_bitwidth, in_len);
2281 let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
2283
2284 match ret_ty.kind() {
2285 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
2286 return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
2288 }
2289 ty::Array(elem, len)
2290 if #[allow(non_exhaustive_omitted_patterns)] match elem.kind() {
ty::Uint(ty::UintTy::U8) => true,
_ => false,
}matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
2291 && len
2292 .try_to_target_usize(bx.tcx)
2293 .expect("expected monomorphic const in codegen")
2294 == expected_bytes =>
2295 {
2296 let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
2298
2299 let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
2301 bx.store(ze, ptr, Align::ONE);
2302 let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
2303 return Ok(bx.load(array_ty, ptr, Align::ONE));
2304 }
2305 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::CannotReturn {
span,
name,
ret_ty,
expected_int_bits,
expected_bytes,
});
return Err(());
}return_error!(InvalidMonomorphization::CannotReturn {
2306 span,
2307 name,
2308 ret_ty,
2309 expected_int_bits,
2310 expected_bytes
2311 }),
2312 }
2313 }
2314
2315 fn simd_simple_float_intrinsic<'ll, 'tcx>(
2316 name: Symbol,
2317 in_elem: Ty<'_>,
2318 in_ty: Ty<'_>,
2319 in_len: u64,
2320 bx: &mut Builder<'_, 'll, 'tcx>,
2321 span: Span,
2322 args: &[OperandRef<'tcx, &'ll Value>],
2323 ) -> Result<&'ll Value, ()> {
2324 macro_rules! return_error {
2325 ($diag: expr) => {{
2326 bx.sess().dcx().emit_err($diag);
2327 return Err(());
2328 }};
2329 }
2330
2331 let ty::Float(f) = in_elem.kind() else {
2332 {
bx.sess().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
span,
name,
ty: in_ty,
});
return Err(());
};return_error!(InvalidMonomorphization::BasicFloatType { span, name, ty: in_ty });
2333 };
2334 let elem_ty = bx.cx.type_float_from_ty(*f);
2335
2336 let vec_ty = bx.type_vector(elem_ty, in_len);
2337
2338 let intr_name = match name {
2339 sym::simd_ceil => "llvm.ceil",
2340 sym::simd_fabs => "llvm.fabs",
2341 sym::simd_fcos => "llvm.cos",
2342 sym::simd_fexp2 => "llvm.exp2",
2343 sym::simd_fexp => "llvm.exp",
2344 sym::simd_flog10 => "llvm.log10",
2345 sym::simd_flog2 => "llvm.log2",
2346 sym::simd_flog => "llvm.log",
2347 sym::simd_floor => "llvm.floor",
2348 sym::simd_fma => "llvm.fma",
2349 sym::simd_relaxed_fma => "llvm.fmuladd",
2350 sym::simd_fsin => "llvm.sin",
2351 sym::simd_fsqrt => "llvm.sqrt",
2352 sym::simd_round => "llvm.round",
2353 sym::simd_round_ties_even => "llvm.rint",
2354 sym::simd_trunc => "llvm.trunc",
2355 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnrecognizedIntrinsic {
span,
name,
});
return Err(());
}return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
2356 };
2357 Ok(bx.call_intrinsic(
2358 intr_name,
2359 &[vec_ty],
2360 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
2361 ))
2362 }
2363
2364 if #[allow(non_exhaustive_omitted_patterns)] match name {
sym::simd_ceil | sym::simd_fabs | sym::simd_fcos | sym::simd_fexp2 |
sym::simd_fexp | sym::simd_flog10 | sym::simd_flog2 | sym::simd_flog |
sym::simd_floor | sym::simd_fma | sym::simd_fsin | sym::simd_fsqrt |
sym::simd_relaxed_fma | sym::simd_round | sym::simd_round_ties_even |
sym::simd_trunc => true,
_ => false,
}std::matches!(
2365 name,
2366 sym::simd_ceil
2367 | sym::simd_fabs
2368 | sym::simd_fcos
2369 | sym::simd_fexp2
2370 | sym::simd_fexp
2371 | sym::simd_flog10
2372 | sym::simd_flog2
2373 | sym::simd_flog
2374 | sym::simd_floor
2375 | sym::simd_fma
2376 | sym::simd_fsin
2377 | sym::simd_fsqrt
2378 | sym::simd_relaxed_fma
2379 | sym::simd_round
2380 | sym::simd_round_ties_even
2381 | sym::simd_trunc
2382 ) {
2383 return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
2384 }
2385
2386 fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
2387 let elem_ty = match *elem_ty.kind() {
2388 ty::Int(v) => cx.type_int_from_ty(v),
2389 ty::Uint(v) => cx.type_uint_from_ty(v),
2390 ty::Float(v) => cx.type_float_from_ty(v),
2391 ty::RawPtr(_, _) => cx.type_ptr(),
2392 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
2393 };
2394 cx.type_vector(elem_ty, vec_len)
2395 }
2396
2397 if name == sym::simd_gather {
2398 let (_, element_ty0) = {
if !in_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdFirst {
span,
name,
ty: in_ty,
});
return Err(());
};
};
in_ty.simd_size_and_type(bx.tcx())
}require_simd!(in_ty, SimdFirst);
2409 let (out_len, element_ty1) = {
if !args[1].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdSecond {
span,
name,
ty: args[1].layout.ty,
});
return Err(());
};
};
args[1].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[1].layout.ty, SimdSecond);
2410 let (out_len2, element_ty2) = {
if !args[2].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdThird {
span,
name,
ty: args[2].layout.ty,
});
return Err(());
};
};
args[2].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[2].layout.ty, SimdThird);
2412 {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
};require_simd!(ret_ty, SimdReturn);
2413
2414 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SecondArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[1].layout.ty,
out_len,
});
return Err(());
};
};require!(
2416 in_len == out_len,
2417 InvalidMonomorphization::SecondArgumentLength {
2418 span,
2419 name,
2420 in_len,
2421 in_ty,
2422 arg_ty: args[1].layout.ty,
2423 out_len
2424 }
2425 );
2426 if !(in_len == out_len2) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[2].layout.ty,
out_len: out_len2,
});
return Err(());
};
};require!(
2427 in_len == out_len2,
2428 InvalidMonomorphization::ThirdArgumentLength {
2429 span,
2430 name,
2431 in_len,
2432 in_ty,
2433 arg_ty: args[2].layout.ty,
2434 out_len: out_len2
2435 }
2436 );
2437
2438 if !(ret_ty == in_ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedReturnType {
span,
name,
in_ty,
ret_ty,
});
return Err(());
};
};require!(
2440 ret_ty == in_ty,
2441 InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
2442 );
2443
2444 if !#[allow(non_exhaustive_omitted_patterns)] match *element_ty1.kind() {
ty::RawPtr(p_ty, _) if
p_ty == in_elem && p_ty.kind() == element_ty0.kind() => true,
_ => false,
} {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: element_ty1,
second_arg: args[1].layout.ty,
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Not,
});
return Err(());
};
};require!(
2445 matches!(
2446 *element_ty1.kind(),
2447 ty::RawPtr(p_ty, _) if p_ty == in_elem && p_ty.kind() == element_ty0.kind()
2448 ),
2449 InvalidMonomorphization::ExpectedElementType {
2450 span,
2451 name,
2452 expected_element: element_ty1,
2453 second_arg: args[1].layout.ty,
2454 in_elem,
2455 in_ty,
2456 mutability: ExpectedPointerMutability::Not,
2457 }
2458 );
2459
2460 let mask_elem_bitwidth = match element_ty2.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: element_ty2,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2461 element_ty2.kind(),
2462 InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
2463 );
2464
2465 let alignment = bx.align_of(in_elem).bytes();
2467
2468 let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
2470
2471 let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
2473
2474 let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
2476
2477 let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2478 let alignment = bx.const_i32(alignment as i32);
2479 &[args[1].immediate(), alignment, mask, args[0].immediate()]
2480 } else {
2481 &[args[1].immediate(), mask, args[0].immediate()]
2482 };
2483
2484 let call =
2485 bx.call_intrinsic("llvm.masked.gather", &[llvm_elem_vec_ty, llvm_pointer_vec_ty], args);
2486 if llvm_version >= (22, 0, 0) {
2487 crate::attributes::apply_to_callsite(
2488 call,
2489 crate::llvm::AttributePlace::Argument(0),
2490 &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2491 )
2492 }
2493 return Ok(call);
2494 }
2495
2496 fn llvm_alignment<'ll, 'tcx>(
2497 bx: &mut Builder<'_, 'll, 'tcx>,
2498 alignment: SimdAlign,
2499 vector_ty: Ty<'tcx>,
2500 element_ty: Ty<'tcx>,
2501 ) -> u64 {
2502 match alignment {
2503 SimdAlign::Unaligned => 1,
2504 SimdAlign::Element => bx.align_of(element_ty).bytes(),
2505 SimdAlign::Vector => bx.align_of(vector_ty).bytes(),
2506 }
2507 }
2508
2509 if name == sym::simd_masked_load {
2510 let alignment = fn_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
2519
2520 let mask_ty = in_ty;
2522 let (mask_len, mask_elem) = (in_len, in_elem);
2523
2524 let pointer_ty = args[1].layout.ty;
2526
2527 let values_ty = args[2].layout.ty;
2529 let (values_len, values_elem) = {
if !values_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdThird {
span,
name,
ty: values_ty,
});
return Err(());
};
};
values_ty.simd_size_and_type(bx.tcx())
}require_simd!(values_ty, SimdThird);
2530
2531 {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
};require_simd!(ret_ty, SimdReturn);
2532
2533 if !(values_len == mask_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len: mask_len,
in_ty: mask_ty,
arg_ty: values_ty,
out_len: values_len,
});
return Err(());
};
};require!(
2535 values_len == mask_len,
2536 InvalidMonomorphization::ThirdArgumentLength {
2537 span,
2538 name,
2539 in_len: mask_len,
2540 in_ty: mask_ty,
2541 arg_ty: values_ty,
2542 out_len: values_len
2543 }
2544 );
2545
2546 if !(ret_ty == values_ty) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedReturnType {
span,
name,
in_ty: values_ty,
ret_ty,
});
return Err(());
};
};require!(
2548 ret_ty == values_ty,
2549 InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
2550 );
2551
2552 if !#[allow(non_exhaustive_omitted_patterns)] match *pointer_ty.kind() {
ty::RawPtr(p_ty, _) if
p_ty == values_elem && p_ty.kind() == values_elem.kind() =>
true,
_ => false,
} {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: values_elem,
second_arg: pointer_ty,
in_elem: values_elem,
in_ty: values_ty,
mutability: ExpectedPointerMutability::Not,
});
return Err(());
};
};require!(
2553 matches!(
2554 *pointer_ty.kind(),
2555 ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind()
2556 ),
2557 InvalidMonomorphization::ExpectedElementType {
2558 span,
2559 name,
2560 expected_element: values_elem,
2561 second_arg: pointer_ty,
2562 in_elem: values_elem,
2563 in_ty: values_ty,
2564 mutability: ExpectedPointerMutability::Not,
2565 }
2566 );
2567
2568 let m_elem_bitwidth = match mask_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: mask_elem,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2569 mask_elem.kind(),
2570 InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
2571 );
2572
2573 let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
2574
2575 let alignment = llvm_alignment(bx, alignment, values_ty, values_elem);
2577
2578 let llvm_pointer = bx.type_ptr();
2579
2580 let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
2582
2583 let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2584 let alignment = bx.const_i32(alignment as i32);
2585
2586 &[args[1].immediate(), alignment, mask, args[2].immediate()]
2587 } else {
2588 &[args[1].immediate(), mask, args[2].immediate()]
2589 };
2590
2591 let call = bx.call_intrinsic("llvm.masked.load", &[llvm_elem_vec_ty, llvm_pointer], args);
2592 if llvm_version >= (22, 0, 0) {
2593 crate::attributes::apply_to_callsite(
2594 call,
2595 crate::llvm::AttributePlace::Argument(0),
2596 &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2597 )
2598 }
2599 return Ok(call);
2600 }
2601
2602 if name == sym::simd_masked_store {
2603 let alignment = fn_args[3].expect_const().to_branch()[0].to_leaf().to_simd_alignment();
2612
2613 let mask_ty = in_ty;
2615 let (mask_len, mask_elem) = (in_len, in_elem);
2616
2617 let pointer_ty = args[1].layout.ty;
2619
2620 let values_ty = args[2].layout.ty;
2622 let (values_len, values_elem) = {
if !values_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdThird {
span,
name,
ty: values_ty,
});
return Err(());
};
};
values_ty.simd_size_and_type(bx.tcx())
}require_simd!(values_ty, SimdThird);
2623
2624 if !(values_len == mask_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len: mask_len,
in_ty: mask_ty,
arg_ty: values_ty,
out_len: values_len,
});
return Err(());
};
};require!(
2626 values_len == mask_len,
2627 InvalidMonomorphization::ThirdArgumentLength {
2628 span,
2629 name,
2630 in_len: mask_len,
2631 in_ty: mask_ty,
2632 arg_ty: values_ty,
2633 out_len: values_len
2634 }
2635 );
2636
2637 if !#[allow(non_exhaustive_omitted_patterns)] match *pointer_ty.kind() {
ty::RawPtr(p_ty, p_mutbl) if
p_ty == values_elem && p_ty.kind() == values_elem.kind() &&
p_mutbl.is_mut() => true,
_ => false,
} {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: values_elem,
second_arg: pointer_ty,
in_elem: values_elem,
in_ty: values_ty,
mutability: ExpectedPointerMutability::Mut,
});
return Err(());
};
};require!(
2639 matches!(
2640 *pointer_ty.kind(),
2641 ty::RawPtr(p_ty, p_mutbl)
2642 if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
2643 ),
2644 InvalidMonomorphization::ExpectedElementType {
2645 span,
2646 name,
2647 expected_element: values_elem,
2648 second_arg: pointer_ty,
2649 in_elem: values_elem,
2650 in_ty: values_ty,
2651 mutability: ExpectedPointerMutability::Mut,
2652 }
2653 );
2654
2655 let m_elem_bitwidth = match mask_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: mask_elem,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2656 mask_elem.kind(),
2657 InvalidMonomorphization::MaskWrongElementType { span, name, ty: mask_elem }
2658 );
2659
2660 let mask = vector_mask_to_bitmask(bx, args[0].immediate(), m_elem_bitwidth, mask_len);
2661
2662 let alignment = llvm_alignment(bx, alignment, values_ty, values_elem);
2664
2665 let llvm_pointer = bx.type_ptr();
2666
2667 let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
2669
2670 let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2671 let alignment = bx.const_i32(alignment as i32);
2672 &[args[2].immediate(), args[1].immediate(), alignment, mask]
2673 } else {
2674 &[args[2].immediate(), args[1].immediate(), mask]
2675 };
2676
2677 let call = bx.call_intrinsic("llvm.masked.store", &[llvm_elem_vec_ty, llvm_pointer], args);
2678 if llvm_version >= (22, 0, 0) {
2679 crate::attributes::apply_to_callsite(
2680 call,
2681 crate::llvm::AttributePlace::Argument(1),
2682 &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2683 )
2684 }
2685 return Ok(call);
2686 }
2687
2688 if name == sym::simd_scatter {
2689 let (_, element_ty0) = {
if !in_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdFirst {
span,
name,
ty: in_ty,
});
return Err(());
};
};
in_ty.simd_size_and_type(bx.tcx())
}require_simd!(in_ty, SimdFirst);
2699 let (element_len1, element_ty1) = {
if !args[1].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdSecond {
span,
name,
ty: args[1].layout.ty,
});
return Err(());
};
};
args[1].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[1].layout.ty, SimdSecond);
2700 let (element_len2, element_ty2) = {
if !args[2].layout.ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdThird {
span,
name,
ty: args[2].layout.ty,
});
return Err(());
};
};
args[2].layout.ty.simd_size_and_type(bx.tcx())
}require_simd!(args[2].layout.ty, SimdThird);
2701
2702 if !(in_len == element_len1) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SecondArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[1].layout.ty,
out_len: element_len1,
});
return Err(());
};
};require!(
2704 in_len == element_len1,
2705 InvalidMonomorphization::SecondArgumentLength {
2706 span,
2707 name,
2708 in_len,
2709 in_ty,
2710 arg_ty: args[1].layout.ty,
2711 out_len: element_len1
2712 }
2713 );
2714 if !(in_len == element_len2) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ThirdArgumentLength {
span,
name,
in_len,
in_ty,
arg_ty: args[2].layout.ty,
out_len: element_len2,
});
return Err(());
};
};require!(
2715 in_len == element_len2,
2716 InvalidMonomorphization::ThirdArgumentLength {
2717 span,
2718 name,
2719 in_len,
2720 in_ty,
2721 arg_ty: args[2].layout.ty,
2722 out_len: element_len2
2723 }
2724 );
2725
2726 if !#[allow(non_exhaustive_omitted_patterns)] match *element_ty1.kind() {
ty::RawPtr(p_ty, p_mutbl) if
p_ty == in_elem && p_mutbl.is_mut() &&
p_ty.kind() == element_ty0.kind() => true,
_ => false,
} {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: element_ty1,
second_arg: args[1].layout.ty,
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Mut,
});
return Err(());
};
};require!(
2727 matches!(
2728 *element_ty1.kind(),
2729 ty::RawPtr(p_ty, p_mutbl)
2730 if p_ty == in_elem && p_mutbl.is_mut() && p_ty.kind() == element_ty0.kind()
2731 ),
2732 InvalidMonomorphization::ExpectedElementType {
2733 span,
2734 name,
2735 expected_element: element_ty1,
2736 second_arg: args[1].layout.ty,
2737 in_elem,
2738 in_ty,
2739 mutability: ExpectedPointerMutability::Mut,
2740 }
2741 );
2742
2743 let mask_elem_bitwidth = match element_ty2.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::MaskWrongElementType {
span,
name,
ty: element_ty2,
});
return Err(());
};
}
}require_int_or_uint_ty!(
2745 element_ty2.kind(),
2746 InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
2747 );
2748
2749 let alignment = bx.align_of(in_elem).bytes();
2751
2752 let mask = vector_mask_to_bitmask(bx, args[2].immediate(), mask_elem_bitwidth, in_len);
2754
2755 let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
2757
2758 let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
2760 let args: &[&'ll Value] = if llvm_version < (22, 0, 0) {
2761 let alignment = bx.const_i32(alignment as i32);
2762 &[args[0].immediate(), args[1].immediate(), alignment, mask]
2763 } else {
2764 &[args[0].immediate(), args[1].immediate(), mask]
2765 };
2766 let call = bx.call_intrinsic(
2767 "llvm.masked.scatter",
2768 &[llvm_elem_vec_ty, llvm_pointer_vec_ty],
2769 args,
2770 );
2771 if llvm_version >= (22, 0, 0) {
2772 crate::attributes::apply_to_callsite(
2773 call,
2774 crate::llvm::AttributePlace::Argument(1),
2775 &[crate::llvm::CreateAlignmentAttr(bx.llcx, alignment)],
2776 )
2777 }
2778 return Ok(call);
2779 }
2780
2781 macro_rules! arith_red {
2782 ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
2783 $identity:expr) => {
2784 if name == sym::$name {
2785 require!(
2786 ret_ty == in_elem,
2787 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2788 );
2789 return match in_elem.kind() {
2790 ty::Int(_) | ty::Uint(_) => {
2791 let r = bx.$integer_reduce(args[0].immediate());
2792 if $ordered {
2793 Ok(bx.$op(args[1].immediate(), r))
2796 } else {
2797 Ok(bx.$integer_reduce(args[0].immediate()))
2798 }
2799 }
2800 ty::Float(f) => {
2801 let acc = if $ordered {
2802 args[1].immediate()
2804 } else {
2805 match f.bit_width() {
2807 32 => bx.const_real(bx.type_f32(), $identity),
2808 64 => bx.const_real(bx.type_f64(), $identity),
2809 v => return_error!(
2810 InvalidMonomorphization::UnsupportedSymbolOfSize {
2811 span,
2812 name,
2813 symbol: sym::$name,
2814 in_ty,
2815 in_elem,
2816 size: v,
2817 ret_ty
2818 }
2819 ),
2820 }
2821 };
2822 Ok(bx.$float_reduce(acc, args[0].immediate()))
2823 }
2824 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2825 span,
2826 name,
2827 symbol: sym::$name,
2828 in_ty,
2829 in_elem,
2830 ret_ty
2831 }),
2832 };
2833 }
2834 };
2835 }
2836
2837 if name == sym::simd_reduce_add_ordered {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_add(args[0].immediate());
if true {
Ok(bx.add(args[1].immediate(), r))
} else { Ok(bx.vector_reduce_add(args[0].immediate())) }
}
ty::Float(f) => {
let acc =
if true {
args[1].immediate()
} else {
match f.bit_width() {
32 => bx.const_real(bx.type_f32(), -0.0),
64 => bx.const_real(bx.type_f64(), -0.0),
v => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbolOfSize {
span,
name,
symbol: sym::simd_reduce_add_ordered,
in_ty,
in_elem,
size: v,
ret_ty,
});
return Err(());
}
}
};
Ok(bx.vector_reduce_fadd(acc, args[0].immediate()))
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_add_ordered,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, -0.0);
2838 if name == sym::simd_reduce_mul_ordered {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_mul(args[0].immediate());
if true {
Ok(bx.mul(args[1].immediate(), r))
} else { Ok(bx.vector_reduce_mul(args[0].immediate())) }
}
ty::Float(f) => {
let acc =
if true {
args[1].immediate()
} else {
match f.bit_width() {
32 => bx.const_real(bx.type_f32(), 1.0),
64 => bx.const_real(bx.type_f64(), 1.0),
v => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbolOfSize {
span,
name,
symbol: sym::simd_reduce_mul_ordered,
in_ty,
in_elem,
size: v,
ret_ty,
});
return Err(());
}
}
};
Ok(bx.vector_reduce_fmul(acc, args[0].immediate()))
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_mul_ordered,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
2839 if name == sym::simd_reduce_add_unordered {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_add(args[0].immediate());
if false {
Ok(bx.add(args[1].immediate(), r))
} else { Ok(bx.vector_reduce_add(args[0].immediate())) }
}
ty::Float(f) => {
let acc =
if false {
args[1].immediate()
} else {
match f.bit_width() {
32 => bx.const_real(bx.type_f32(), -0.0),
64 => bx.const_real(bx.type_f64(), -0.0),
v => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbolOfSize {
span,
name,
symbol: sym::simd_reduce_add_unordered,
in_ty,
in_elem,
size: v,
ret_ty,
});
return Err(());
}
}
};
Ok(bx.vector_reduce_fadd_reassoc(acc, args[0].immediate()))
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_add_unordered,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};arith_red!(
2840 simd_reduce_add_unordered: vector_reduce_add,
2841 vector_reduce_fadd_reassoc,
2842 false,
2843 add,
2844 -0.0
2845 );
2846 if name == sym::simd_reduce_mul_unordered {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_mul(args[0].immediate());
if false {
Ok(bx.mul(args[1].immediate(), r))
} else { Ok(bx.vector_reduce_mul(args[0].immediate())) }
}
ty::Float(f) => {
let acc =
if false {
args[1].immediate()
} else {
match f.bit_width() {
32 => bx.const_real(bx.type_f32(), 1.0),
64 => bx.const_real(bx.type_f64(), 1.0),
v => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbolOfSize {
span,
name,
symbol: sym::simd_reduce_mul_unordered,
in_ty,
in_elem,
size: v,
ret_ty,
});
return Err(());
}
}
};
Ok(bx.vector_reduce_fmul_reassoc(acc, args[0].immediate()))
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_mul_unordered,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};arith_red!(
2847 simd_reduce_mul_unordered: vector_reduce_mul,
2848 vector_reduce_fmul_reassoc,
2849 false,
2850 mul,
2851 1.0
2852 );
2853
2854 macro_rules! minmax_red {
2855 ($name:ident: $int_red:ident, $float_red:ident) => {
2856 if name == sym::$name {
2857 require!(
2858 ret_ty == in_elem,
2859 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2860 );
2861 return match in_elem.kind() {
2862 ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
2863 ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
2864 ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
2865 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2866 span,
2867 name,
2868 symbol: sym::$name,
2869 in_ty,
2870 in_elem,
2871 ret_ty
2872 }),
2873 };
2874 }
2875 };
2876 }
2877
2878 if name == sym::simd_reduce_min {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_i) =>
Ok(bx.vector_reduce_min(args[0].immediate(), true)),
ty::Uint(_u) =>
Ok(bx.vector_reduce_min(args[0].immediate(), false)),
ty::Float(_f) => Ok(bx.vector_reduce_fmin(args[0].immediate())),
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_min,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
2879 if name == sym::simd_reduce_max {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
return match in_elem.kind() {
ty::Int(_i) =>
Ok(bx.vector_reduce_max(args[0].immediate(), true)),
ty::Uint(_u) =>
Ok(bx.vector_reduce_max(args[0].immediate(), false)),
ty::Float(_f) => Ok(bx.vector_reduce_fmax(args[0].immediate())),
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_max,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
2880
2881 macro_rules! bitwise_red {
2882 ($name:ident : $red:ident, $boolean:expr) => {
2883 if name == sym::$name {
2884 let input = if !$boolean {
2885 require!(
2886 ret_ty == in_elem,
2887 InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
2888 );
2889 args[0].immediate()
2890 } else {
2891 let bitwidth = match in_elem.kind() {
2892 ty::Int(i) => {
2893 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2894 }
2895 ty::Uint(i) => {
2896 i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size().bits())
2897 }
2898 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2899 span,
2900 name,
2901 symbol: sym::$name,
2902 in_ty,
2903 in_elem,
2904 ret_ty
2905 }),
2906 };
2907
2908 vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth, in_len as _)
2909 };
2910 return match in_elem.kind() {
2911 ty::Int(_) | ty::Uint(_) => {
2912 let r = bx.$red(input);
2913 Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
2914 }
2915 _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
2916 span,
2917 name,
2918 symbol: sym::$name,
2919 in_ty,
2920 in_elem,
2921 ret_ty
2922 }),
2923 };
2924 }
2925 };
2926 }
2927
2928 if name == sym::simd_reduce_and {
let input =
if !false {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
args[0].immediate()
} else {
let bitwidth =
match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_and,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth,
in_len as _)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_and(input);
Ok(if !false { r } else { bx.zext(r, bx.type_bool()) })
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_and,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};bitwise_red!(simd_reduce_and: vector_reduce_and, false);
2929 if name == sym::simd_reduce_or {
let input =
if !false {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
args[0].immediate()
} else {
let bitwidth =
match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_or,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth,
in_len as _)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_or(input);
Ok(if !false { r } else { bx.zext(r, bx.type_bool()) })
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_or,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};bitwise_red!(simd_reduce_or: vector_reduce_or, false);
2930 if name == sym::simd_reduce_xor {
let input =
if !false {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
args[0].immediate()
} else {
let bitwidth =
match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_xor,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth,
in_len as _)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_xor(input);
Ok(if !false { r } else { bx.zext(r, bx.type_bool()) })
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_xor,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
2931 if name == sym::simd_reduce_all {
let input =
if !true {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
args[0].immediate()
} else {
let bitwidth =
match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_all,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth,
in_len as _)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_and(input);
Ok(if !true { r } else { bx.zext(r, bx.type_bool()) })
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_all,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};bitwise_red!(simd_reduce_all: vector_reduce_and, true);
2932 if name == sym::simd_reduce_any {
let input =
if !true {
if !(ret_ty == in_elem) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnType {
span,
name,
in_elem,
in_ty,
ret_ty,
});
return Err(());
};
};
args[0].immediate()
} else {
let bitwidth =
match in_elem.kind() {
ty::Int(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
ty::Uint(i) => {
i.bit_width().unwrap_or_else(||
bx.data_layout().pointer_size().bits())
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_any,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
vector_mask_to_bitmask(bx, args[0].immediate(), bitwidth,
in_len as _)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.vector_reduce_or(input);
Ok(if !true { r } else { bx.zext(r, bx.type_bool()) })
}
_ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedSymbol {
span,
name,
symbol: sym::simd_reduce_any,
in_ty,
in_elem,
ret_ty,
});
return Err(());
}
};
};bitwise_red!(simd_reduce_any: vector_reduce_or, true);
2933
2934 if name == sym::simd_cast_ptr {
2935 let (out_len, out_elem) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
2936 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len,
});
return Err(());
};
};require!(
2937 in_len == out_len,
2938 InvalidMonomorphization::ReturnLengthInputType {
2939 span,
2940 name,
2941 in_len,
2942 in_ty,
2943 ret_ty,
2944 out_len
2945 }
2946 );
2947
2948 match in_elem.kind() {
2949 ty::RawPtr(p_ty, _) => {
2950 let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2951 bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2952 });
2953 if !metadata.is_unit() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::CastWidePointer {
span,
name,
ty: in_elem,
});
return Err(());
};
};require!(
2954 metadata.is_unit(),
2955 InvalidMonomorphization::CastWidePointer { span, name, ty: in_elem }
2956 );
2957 }
2958 _ => {
2959 {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedPointer {
span,
name,
ty: in_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2960 }
2961 }
2962 match out_elem.kind() {
2963 ty::RawPtr(p_ty, _) => {
2964 let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| {
2965 bx.tcx.normalize_erasing_regions(bx.typing_env(), ty)
2966 });
2967 if !metadata.is_unit() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::CastWidePointer {
span,
name,
ty: out_elem,
});
return Err(());
};
};require!(
2968 metadata.is_unit(),
2969 InvalidMonomorphization::CastWidePointer { span, name, ty: out_elem }
2970 );
2971 }
2972 _ => {
2973 {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedPointer {
span,
name,
ty: out_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
2974 }
2975 }
2976
2977 return Ok(args[0].immediate());
2978 }
2979
2980 if name == sym::simd_expose_provenance {
2981 let (out_len, out_elem) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
2982 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len,
});
return Err(());
};
};require!(
2983 in_len == out_len,
2984 InvalidMonomorphization::ReturnLengthInputType {
2985 span,
2986 name,
2987 in_len,
2988 in_ty,
2989 ret_ty,
2990 out_len
2991 }
2992 );
2993
2994 match in_elem.kind() {
2995 ty::RawPtr(_, _) => {}
2996 _ => {
2997 {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedPointer {
span,
name,
ty: in_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
2998 }
2999 }
3000 match out_elem.kind() {
3001 ty::Uint(ty::UintTy::Usize) => {}
3002 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedUsize {
span,
name,
ty: out_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
3003 }
3004
3005 return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
3006 }
3007
3008 if name == sym::simd_with_exposed_provenance {
3009 let (out_len, out_elem) = {
if !ret_ty.is_simd() {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
ret_ty.simd_size_and_type(bx.tcx())
}require_simd!(ret_ty, SimdReturn);
3010 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len,
});
return Err(());
};
};require!(
3011 in_len == out_len,
3012 InvalidMonomorphization::ReturnLengthInputType {
3013 span,
3014 name,
3015 in_len,
3016 in_ty,
3017 ret_ty,
3018 out_len
3019 }
3020 );
3021
3022 match in_elem.kind() {
3023 ty::Uint(ty::UintTy::Usize) => {}
3024 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedUsize {
span,
name,
ty: in_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
3025 }
3026 match out_elem.kind() {
3027 ty::RawPtr(_, _) => {}
3028 _ => {
3029 {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedPointer {
span,
name,
ty: out_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
3030 }
3031 }
3032
3033 return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
3034 }
3035
3036 if name == sym::simd_cast || name == sym::simd_as {
3037 let (out_len, out_elem, out_num_vecs) = {
if !(ret_ty.is_simd() || ret_ty.is_scalable_vector()) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::SimdReturn {
span,
name,
ty: ret_ty,
});
return Err(());
};
};
if ret_ty.is_simd() {
let (len, ty) = ret_ty.simd_size_and_type(bx.tcx());
(len, ty, None)
} else {
let (count, ty, num_vecs) =
ret_ty.scalable_vector_parts(bx.tcx()).expect("`is_scalable_vector` was wrong");
(count as u64, ty, Some(num_vecs))
}
}require_simd_or_scalable!(ret_ty, SimdReturn);
3038 if !(in_len == out_len) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnLengthInputType {
span,
name,
in_len,
in_ty,
ret_ty,
out_len,
});
return Err(());
};
};require!(
3039 in_len == out_len,
3040 InvalidMonomorphization::ReturnLengthInputType {
3041 span,
3042 name,
3043 in_len,
3044 in_ty,
3045 ret_ty,
3046 out_len
3047 }
3048 );
3049 if !(in_num_vecs == out_num_vecs) {
{
bx.sess().dcx().emit_err(InvalidMonomorphization::ReturnNumVecsInputType {
span,
name,
in_num_vecs: in_num_vecs.unwrap_or(NumScalableVectors(1)),
in_ty,
ret_ty,
out_num_vecs: out_num_vecs.unwrap_or(NumScalableVectors(1)),
});
return Err(());
};
};require!(
3050 in_num_vecs == out_num_vecs,
3051 InvalidMonomorphization::ReturnNumVecsInputType {
3052 span,
3053 name,
3054 in_num_vecs: in_num_vecs.unwrap_or(NumScalableVectors(1)),
3055 in_ty,
3056 ret_ty,
3057 out_num_vecs: out_num_vecs.unwrap_or(NumScalableVectors(1))
3058 }
3059 );
3060
3061 if in_elem == out_elem {
3063 return Ok(args[0].immediate());
3064 }
3065
3066 #[derive(#[automatically_derived]
impl ::core::marker::Copy for Sign { }Copy, #[automatically_derived]
impl ::core::clone::Clone for Sign {
#[inline]
fn clone(&self) -> Sign { *self }
}Clone)]
3067 enum Sign {
3068 Unsigned,
3069 Signed,
3070 }
3071 use Sign::*;
3072
3073 enum Style {
3074 Float,
3075 Int(Sign),
3076 Unsupported,
3077 }
3078
3079 let (in_style, in_width) = match in_elem.kind() {
3080 ty::Int(i) => (
3083 Style::Int(Signed),
3084 i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3085 ),
3086 ty::Uint(u) => (
3087 Style::Int(Unsigned),
3088 u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3089 ),
3090 ty::Float(f) => (Style::Float, f.bit_width()),
3091 _ => (Style::Unsupported, 0),
3092 };
3093 let (out_style, out_width) = match out_elem.kind() {
3094 ty::Int(i) => (
3095 Style::Int(Signed),
3096 i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3097 ),
3098 ty::Uint(u) => (
3099 Style::Int(Unsigned),
3100 u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
3101 ),
3102 ty::Float(f) => (Style::Float, f.bit_width()),
3103 _ => (Style::Unsupported, 0),
3104 };
3105
3106 match (in_style, out_style) {
3107 (Style::Int(sign), Style::Int(_)) => {
3108 return Ok(match in_width.cmp(&out_width) {
3109 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
3110 Ordering::Equal => args[0].immediate(),
3111 Ordering::Less => match sign {
3112 Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
3113 Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
3114 },
3115 });
3116 }
3117 (Style::Int(Sign::Signed), Style::Float) => {
3118 return Ok(bx.sitofp(args[0].immediate(), llret_ty));
3119 }
3120 (Style::Int(Sign::Unsigned), Style::Float) => {
3121 return Ok(bx.uitofp(args[0].immediate(), llret_ty));
3122 }
3123 (Style::Float, Style::Int(sign)) => {
3124 return Ok(match (sign, name == sym::simd_as) {
3125 (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
3126 (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
3127 (_, true) => bx.cast_float_to_int(
3128 #[allow(non_exhaustive_omitted_patterns)] match sign {
Sign::Signed => true,
_ => false,
}matches!(sign, Sign::Signed),
3129 args[0].immediate(),
3130 llret_ty,
3131 ),
3132 });
3133 }
3134 (Style::Float, Style::Float) => {
3135 return Ok(match in_width.cmp(&out_width) {
3136 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
3137 Ordering::Equal => args[0].immediate(),
3138 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
3139 });
3140 }
3141 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedCast {
span,
name,
in_ty,
in_elem,
ret_ty,
out_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::UnsupportedCast {
3142 span,
3143 name,
3144 in_ty,
3145 in_elem,
3146 ret_ty,
3147 out_elem
3148 }),
3149 }
3150 }
3151 macro_rules! arith_binary {
3152 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
3153 $(if name == sym::$name {
3154 match in_elem.kind() {
3155 $($(ty::$p(_))|* => {
3156 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
3157 })*
3158 _ => {},
3159 }
3160 return_error!(
3161 InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
3162 );
3163 })*
3164 }
3165 }
3166 if name == sym::simd_minimum_number_nsz {
match in_elem.kind() {
ty::Float(_) => {
return Ok(bx.minimum_number_nsz(args[0].immediate(),
args[1].immediate()))
}
_ => {}
}
{
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedOperation {
span,
name,
in_ty,
in_elem,
});
return Err(());
};
}arith_binary! {
3167 simd_add: Uint, Int => add, Float => fadd;
3168 simd_sub: Uint, Int => sub, Float => fsub;
3169 simd_mul: Uint, Int => mul, Float => fmul;
3170 simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
3171 simd_rem: Uint => urem, Int => srem, Float => frem;
3172 simd_shl: Uint, Int => shl;
3173 simd_shr: Uint => lshr, Int => ashr;
3174 simd_and: Uint, Int => and;
3175 simd_or: Uint, Int => or;
3176 simd_xor: Uint, Int => xor;
3177 simd_maximum_number_nsz: Float => maximum_number_nsz;
3178 simd_minimum_number_nsz: Float => minimum_number_nsz;
3179
3180 }
3181 macro_rules! arith_unary {
3182 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
3183 $(if name == sym::$name {
3184 match in_elem.kind() {
3185 $($(ty::$p(_))|* => {
3186 return Ok(bx.$call(args[0].immediate()))
3187 })*
3188 _ => {},
3189 }
3190 return_error!(
3191 InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
3192 );
3193 })*
3194 }
3195 }
3196 if name == sym::simd_neg {
match in_elem.kind() {
ty::Int(_) => { return Ok(bx.neg(args[0].immediate())) }
ty::Float(_) => { return Ok(bx.fneg(args[0].immediate())) }
_ => {}
}
{
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedOperation {
span,
name,
in_ty,
in_elem,
});
return Err(());
};
}arith_unary! {
3197 simd_neg: Int => neg, Float => fneg;
3198 }
3199
3200 if #[allow(non_exhaustive_omitted_patterns)] match name {
sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctlz | sym::simd_ctpop
| sym::simd_cttz | sym::simd_carryless_mul | sym::simd_funnel_shl |
sym::simd_funnel_shr => true,
_ => false,
}matches!(
3202 name,
3203 sym::simd_bswap
3204 | sym::simd_bitreverse
3205 | sym::simd_ctlz
3206 | sym::simd_ctpop
3207 | sym::simd_cttz
3208 | sym::simd_carryless_mul
3209 | sym::simd_funnel_shl
3210 | sym::simd_funnel_shr
3211 ) {
3212 let vec_ty = bx.cx.type_vector(
3213 match *in_elem.kind() {
3214 ty::Int(i) => bx.cx.type_int_from_ty(i),
3215 ty::Uint(i) => bx.cx.type_uint_from_ty(i),
3216 _ => {
bx.sess().dcx().emit_err(InvalidMonomorphization::UnsupportedOperation {
span,
name,
in_ty,
in_elem,
});
return Err(());
}return_error!(InvalidMonomorphization::UnsupportedOperation {
3217 span,
3218 name,
3219 in_ty,
3220 in_elem
3221 }),
3222 },
3223 in_len as u64,
3224 );
3225 let llvm_intrinsic = match name {
3226 sym::simd_bswap => "llvm.bswap",
3227 sym::simd_bitreverse => "llvm.bitreverse",
3228 sym::simd_ctlz => "llvm.ctlz",
3229 sym::simd_ctpop => "llvm.ctpop",
3230 sym::simd_cttz => "llvm.cttz",
3231 sym::simd_funnel_shl => "llvm.fshl",
3232 sym::simd_funnel_shr => "llvm.fshr",
3233 sym::simd_carryless_mul => "llvm.clmul",
3234 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
3235 };
3236 let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
3237
3238 return match name {
3239 sym::simd_bswap if int_size == 8 => Ok(args[0].immediate()),
3241 sym::simd_ctlz | sym::simd_cttz => {
3242 let dont_poison_on_zero = bx.const_int(bx.type_i1(), 0);
3244 Ok(bx.call_intrinsic(
3245 llvm_intrinsic,
3246 &[vec_ty],
3247 &[args[0].immediate(), dont_poison_on_zero],
3248 ))
3249 }
3250 sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctpop => {
3251 Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[args[0].immediate()]))
3253 }
3254 sym::simd_funnel_shl | sym::simd_funnel_shr => Ok(bx.call_intrinsic(
3255 llvm_intrinsic,
3256 &[vec_ty],
3257 &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
3258 )),
3259 sym::simd_carryless_mul => {
3260 if crate::llvm_util::get_version() >= (22, 0, 0) {
3261 Ok(bx.call_intrinsic(
3262 llvm_intrinsic,
3263 &[vec_ty],
3264 &[args[0].immediate(), args[1].immediate()],
3265 ))
3266 } else {
3267 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("`simd_carryless_mul` needs LLVM 22 or higher"));span_bug!(span, "`simd_carryless_mul` needs LLVM 22 or higher");
3268 }
3269 }
3270 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
3271 };
3272 }
3273
3274 if name == sym::simd_arith_offset {
3275 let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
3277 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("must be called with a vector of pointer types as first argument"))span_bug!(span, "must be called with a vector of pointer types as first argument")
3278 });
3279 let layout = bx.layout_of(pointee);
3280 let ptrs = args[0].immediate();
3281 let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx());
3284 if !#[allow(non_exhaustive_omitted_patterns)] match offsets_elem.kind() {
ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize) => true,
_ => false,
}matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
3285 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("must be called with a vector of pointer-sized integers as second argument"));span_bug!(
3286 span,
3287 "must be called with a vector of pointer-sized integers as second argument"
3288 );
3289 }
3290 let offsets = args[1].immediate();
3291
3292 return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
3293 }
3294
3295 if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
3296 let lhs = args[0].immediate();
3297 let rhs = args[1].immediate();
3298 let is_add = name == sym::simd_saturating_add;
3299 let (signed, elem_ty) = match *in_elem.kind() {
3300 ty::Int(i) => (true, bx.cx.type_int_from_ty(i)),
3301 ty::Uint(i) => (false, bx.cx.type_uint_from_ty(i)),
3302 _ => {
3303 {
bx.sess().dcx().emit_err(InvalidMonomorphization::ExpectedVectorElementType {
span,
name,
expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
vector_type: args[0].layout.ty,
});
return Err(());
};return_error!(InvalidMonomorphization::ExpectedVectorElementType {
3304 span,
3305 name,
3306 expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
3307 vector_type: args[0].layout.ty
3308 });
3309 }
3310 };
3311 let llvm_intrinsic = ::alloc::__export::must_use({
::alloc::fmt::format(format_args!("llvm.{0}{1}.sat",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" }))
})format!(
3312 "llvm.{}{}.sat",
3313 if signed { 's' } else { 'u' },
3314 if is_add { "add" } else { "sub" },
3315 );
3316 let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
3317
3318 return Ok(bx.call_intrinsic(llvm_intrinsic, &[vec_ty], &[lhs, rhs]));
3319 }
3320
3321 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("unknown SIMD intrinsic"));span_bug!(span, "unknown SIMD intrinsic");
3322}