1use rustc_abi::{Align, WrappingRange};
2use rustc_middle::mir::SourceInfo;
3use rustc_middle::ty::{self, Ty, TyCtxt};
4use rustc_middle::{bug, span_bug};
5use rustc_session::config::OptLevel;
6use rustc_span::sym;
7use rustc_target::spec::Arch;
8
9use super::FunctionCx;
10use super::operand::OperandRef;
11use super::place::PlaceRef;
12use crate::common::{AtomicRmwBinOp, SynchronizationScope};
13use crate::errors::InvalidMonomorphization;
14use crate::traits::*;
15use crate::{MemFlags, meth, size_of_val};
16
17fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
18 bx: &mut Bx,
19 allow_overlap: bool,
20 volatile: bool,
21 ty: Ty<'tcx>,
22 dst: Bx::Value,
23 src: Bx::Value,
24 count: Bx::Value,
25) {
26 let layout = bx.layout_of(ty);
27 let size = layout.size;
28 let align = layout.align.abi;
29 let size = bx.mul(bx.const_usize(size.bytes()), count);
30 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
31 if allow_overlap {
32 bx.memmove(dst, align, src, align, size, flags);
33 } else {
34 bx.memcpy(dst, align, src, align, size, flags, None);
35 }
36}
37
38fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
39 bx: &mut Bx,
40 volatile: bool,
41 ty: Ty<'tcx>,
42 dst: Bx::Value,
43 val: Bx::Value,
44 count: Bx::Value,
45) {
46 let layout = bx.layout_of(ty);
47 let size = layout.size;
48 let align = layout.align.abi;
49 let size = bx.mul(bx.const_usize(size.bytes()), count);
50 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
51 bx.memset(dst, val, size, align, flags);
52}
53
54impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
55 pub fn codegen_intrinsic_call(
57 &mut self,
58 bx: &mut Bx,
59 instance: ty::Instance<'tcx>,
60 args: &[OperandRef<'tcx, Bx::Value>],
61 result: PlaceRef<'tcx, Bx::Value>,
62 source_info: SourceInfo,
63 ) -> Result<(), ty::Instance<'tcx>> {
64 let span = source_info.span;
65
66 let name = bx.tcx().item_name(instance.def_id());
67 let fn_args = instance.args;
68
69 if let sym::typed_swap_nonoverlapping = name {
73 let pointee_ty = fn_args.type_at(0);
74 let pointee_layout = bx.layout_of(pointee_ty);
75 if !bx.is_backend_ref(pointee_layout)
76 || bx.sess().opts.optimize == OptLevel::No
79 || bx.sess().target.arch == Arch::SpirV
84 {
85 let align = pointee_layout.align.abi;
86 let x_place = args[0].val.deref(align);
87 let y_place = args[1].val.deref(align);
88 bx.typed_place_swap(x_place, y_place, pointee_layout);
89 return Ok(());
90 }
91 }
92
93 let invalid_monomorphization_int_type = |ty| {
94 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
95 };
96 let invalid_monomorphization_int_or_ptr_type = |ty| {
97 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerOrPtrType {
98 span,
99 name,
100 ty,
101 });
102 };
103
104 let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
105 let discr = ord.to_branch()[0].to_leaf();
106 discr.to_atomic_ordering()
107 };
108
109 if args.is_empty() {
110 match name {
111 sym::abort
112 | sym::unreachable
113 | sym::cold_path
114 | sym::gpu_launch_sized_workgroup_mem
115 | sym::breakpoint
116 | sym::amdgpu_dispatch_ptr
117 | sym::assert_zero_valid
118 | sym::assert_mem_uninitialized_valid
119 | sym::assert_inhabited
120 | sym::ub_checks
121 | sym::contract_checks
122 | sym::atomic_fence
123 | sym::atomic_singlethreadfence
124 | sym::caller_location => {}
125 _ => {
126 ::rustc_middle::util::bug::span_bug_fmt(span,
format_args!("Nullary intrinsic {0} must be called in a const block. If you are seeing this message from code outside the standard library, the unstable implementation details of the relevant intrinsic may have changed. Consider using stable APIs instead. If you are adding a new nullary intrinsic that is inherently a runtime intrinsic, update this check.",
name));span_bug!(
127 span,
128 "Nullary intrinsic {name} must be called in a const block. \
129 If you are seeing this message from code outside the standard library, the \
130 unstable implementation details of the relevant intrinsic may have changed. \
131 Consider using stable APIs instead. \
132 If you are adding a new nullary intrinsic that is inherently a runtime \
133 intrinsic, update this check."
134 );
135 }
136 }
137 }
138
139 let llval = match name {
140 sym::abort => {
141 bx.abort();
142 return Ok(());
143 }
144
145 sym::caller_location => {
146 let location = self.get_caller_location(bx, source_info);
147 location.val.store(bx, result);
148 return Ok(());
149 }
150
151 sym::va_start => bx.va_start(args[0].immediate()),
153
154 sym::size_of_val => {
155 let tp_ty = fn_args.type_at(0);
156 let (_, meta) = args[0].val.pointer_parts();
157 let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
158 llsize
159 }
160 sym::align_of_val => {
161 let tp_ty = fn_args.type_at(0);
162 let (_, meta) = args[0].val.pointer_parts();
163 let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
164 llalign
165 }
166 sym::vtable_size | sym::vtable_align => {
167 let vtable = args[0].immediate();
168 let idx = match name {
169 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
170 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
171 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
172 };
173 let value = meth::VirtualIndex::from_index(idx).get_usize(
174 bx,
175 vtable,
176 instance.ty(bx.tcx(), bx.typing_env()),
177 );
178 match name {
179 sym::vtable_size => {
181 let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
182 bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
183 }
184 sym::vtable_align => {
187 let align_bound = Align::max_for_target(bx.data_layout()).bytes().into();
188 bx.range_metadata(value, WrappingRange { start: 1, end: align_bound })
189 }
190 _ => {}
191 }
192 value
193 }
194 sym::arith_offset => {
195 let ty = fn_args.type_at(0);
196 let layout = bx.layout_of(ty);
197 let ptr = args[0].immediate();
198 let offset = args[1].immediate();
199 bx.gep(bx.backend_type(layout), ptr, &[offset])
200 }
201 sym::copy => {
202 copy_intrinsic(
203 bx,
204 true,
205 false,
206 fn_args.type_at(0),
207 args[1].immediate(),
208 args[0].immediate(),
209 args[2].immediate(),
210 );
211 return Ok(());
212 }
213 sym::write_bytes => {
214 memset_intrinsic(
215 bx,
216 false,
217 fn_args.type_at(0),
218 args[0].immediate(),
219 args[1].immediate(),
220 args[2].immediate(),
221 );
222 return Ok(());
223 }
224
225 sym::volatile_copy_nonoverlapping_memory => {
226 copy_intrinsic(
227 bx,
228 false,
229 true,
230 fn_args.type_at(0),
231 args[0].immediate(),
232 args[1].immediate(),
233 args[2].immediate(),
234 );
235 return Ok(());
236 }
237 sym::volatile_copy_memory => {
238 copy_intrinsic(
239 bx,
240 true,
241 true,
242 fn_args.type_at(0),
243 args[0].immediate(),
244 args[1].immediate(),
245 args[2].immediate(),
246 );
247 return Ok(());
248 }
249 sym::volatile_set_memory => {
250 memset_intrinsic(
251 bx,
252 true,
253 fn_args.type_at(0),
254 args[0].immediate(),
255 args[1].immediate(),
256 args[2].immediate(),
257 );
258 return Ok(());
259 }
260 sym::volatile_store => {
261 let dst = args[0].deref(bx.cx());
262 args[1].val.volatile_store(bx, dst);
263 return Ok(());
264 }
265 sym::unaligned_volatile_store => {
266 let dst = args[0].deref(bx.cx());
267 args[1].val.unaligned_volatile_store(bx, dst);
268 return Ok(());
269 }
270 sym::disjoint_bitor => {
271 let a = args[0].immediate();
272 let b = args[1].immediate();
273 bx.or_disjoint(a, b)
274 }
275 sym::exact_div => {
276 let ty = args[0].layout.ty;
277 match int_type_width_signed(ty, bx.tcx()) {
278 Some((_width, signed)) => {
279 if signed {
280 bx.exactsdiv(args[0].immediate(), args[1].immediate())
281 } else {
282 bx.exactudiv(args[0].immediate(), args[1].immediate())
283 }
284 }
285 None => {
286 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
287 span,
288 name,
289 ty,
290 });
291 return Ok(());
292 }
293 }
294 }
295 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
296 match float_type_width(args[0].layout.ty) {
297 Some(_width) => match name {
298 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
299 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
300 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
301 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
302 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
303 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
304 },
305 None => {
306 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
307 span,
308 name,
309 ty: args[0].layout.ty,
310 });
311 return Ok(());
312 }
313 }
314 }
315 sym::fadd_algebraic
316 | sym::fsub_algebraic
317 | sym::fmul_algebraic
318 | sym::fdiv_algebraic
319 | sym::frem_algebraic => match float_type_width(args[0].layout.ty) {
320 Some(_width) => match name {
321 sym::fadd_algebraic => {
322 bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
323 }
324 sym::fsub_algebraic => {
325 bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
326 }
327 sym::fmul_algebraic => {
328 bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
329 }
330 sym::fdiv_algebraic => {
331 bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
332 }
333 sym::frem_algebraic => {
334 bx.frem_algebraic(args[0].immediate(), args[1].immediate())
335 }
336 _ => ::rustc_middle::util::bug::bug_fmt(format_args!("impossible case reached"))bug!(),
337 },
338 None => {
339 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
340 span,
341 name,
342 ty: args[0].layout.ty,
343 });
344 return Ok(());
345 }
346 },
347
348 sym::float_to_int_unchecked => {
349 if float_type_width(args[0].layout.ty).is_none() {
350 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
351 span,
352 ty: args[0].layout.ty,
353 });
354 return Ok(());
355 }
356 let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx())
357 else {
358 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
359 span,
360 ty: result.layout.ty,
361 });
362 return Ok(());
363 };
364 if signed {
365 bx.fptosi(args[0].immediate(), bx.backend_type(result.layout))
366 } else {
367 bx.fptoui(args[0].immediate(), bx.backend_type(result.layout))
368 }
369 }
370
371 sym::atomic_load => {
372 let ty = fn_args.type_at(0);
373 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
374 invalid_monomorphization_int_or_ptr_type(ty);
375 return Ok(());
376 }
377 let ordering = fn_args.const_at(1).to_value();
378 let layout = bx.layout_of(ty);
379 let source = args[0].immediate();
380 bx.atomic_load(
381 bx.backend_type(layout),
382 source,
383 parse_atomic_ordering(ordering),
384 layout.size,
385 )
386 }
387 sym::atomic_store => {
388 let ty = fn_args.type_at(0);
389 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
390 invalid_monomorphization_int_or_ptr_type(ty);
391 return Ok(());
392 }
393 let ordering = fn_args.const_at(1).to_value();
394 let size = bx.layout_of(ty).size;
395 let val = args[1].immediate();
396 let ptr = args[0].immediate();
397 bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
398 return Ok(());
399 }
400 sym::atomic_cxchg | sym::atomic_cxchgweak => {
402 let ty = fn_args.type_at(0);
403 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
404 invalid_monomorphization_int_or_ptr_type(ty);
405 return Ok(());
406 }
407 let succ_ordering = fn_args.const_at(1).to_value();
408 let fail_ordering = fn_args.const_at(2).to_value();
409 let weak = name == sym::atomic_cxchgweak;
410 let dst = args[0].immediate();
411 let cmp = args[1].immediate();
412 let src = args[2].immediate();
413 let (val, success) = bx.atomic_cmpxchg(
414 dst,
415 cmp,
416 src,
417 parse_atomic_ordering(succ_ordering),
418 parse_atomic_ordering(fail_ordering),
419 weak,
420 );
421 let val = bx.from_immediate(val);
422 let success = bx.from_immediate(success);
423
424 let dest = result.project_field(bx, 0);
425 bx.store_to_place(val, dest.val);
426 let dest = result.project_field(bx, 1);
427 bx.store_to_place(success, dest.val);
428
429 return Ok(());
430 }
431 sym::atomic_max | sym::atomic_min => {
432 let atom_op = if name == sym::atomic_max {
433 AtomicRmwBinOp::AtomicMax
434 } else {
435 AtomicRmwBinOp::AtomicMin
436 };
437
438 let ty = fn_args.type_at(0);
439 if #[allow(non_exhaustive_omitted_patterns)] match ty.kind() {
ty::Int(_) => true,
_ => false,
}matches!(ty.kind(), ty::Int(_)) {
440 let ordering = fn_args.const_at(1).to_value();
441 let ptr = args[0].immediate();
442 let val = args[1].immediate();
443 bx.atomic_rmw(
444 atom_op,
445 ptr,
446 val,
447 parse_atomic_ordering(ordering),
448 false,
449 )
450 } else {
451 invalid_monomorphization_int_type(ty);
452 return Ok(());
453 }
454 }
455 sym::atomic_umax | sym::atomic_umin => {
456 let atom_op = if name == sym::atomic_umax {
457 AtomicRmwBinOp::AtomicUMax
458 } else {
459 AtomicRmwBinOp::AtomicUMin
460 };
461
462 let ty = fn_args.type_at(0);
463 if #[allow(non_exhaustive_omitted_patterns)] match ty.kind() {
ty::Uint(_) => true,
_ => false,
}matches!(ty.kind(), ty::Uint(_)) {
464 let ordering = fn_args.const_at(1).to_value();
465 let ptr = args[0].immediate();
466 let val = args[1].immediate();
467 bx.atomic_rmw(
468 atom_op,
469 ptr,
470 val,
471 parse_atomic_ordering(ordering),
472 false,
473 )
474 } else {
475 invalid_monomorphization_int_type(ty);
476 return Ok(());
477 }
478 }
479 sym::atomic_xchg => {
480 let ty = fn_args.type_at(0);
481 let ordering = fn_args.const_at(1).to_value();
482 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
483 let ptr = args[0].immediate();
484 let val = args[1].immediate();
485 let atomic_op = AtomicRmwBinOp::AtomicXchg;
486 bx.atomic_rmw(
487 atomic_op,
488 ptr,
489 val,
490 parse_atomic_ordering(ordering),
491 ty.is_raw_ptr(),
492 )
493 } else {
494 invalid_monomorphization_int_or_ptr_type(ty);
495 return Ok(());
496 }
497 }
498 sym::atomic_xadd
499 | sym::atomic_xsub
500 | sym::atomic_and
501 | sym::atomic_nand
502 | sym::atomic_or
503 | sym::atomic_xor => {
504 let atom_op = match name {
505 sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
506 sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
507 sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
508 sym::atomic_nand => AtomicRmwBinOp::AtomicNand,
509 sym::atomic_or => AtomicRmwBinOp::AtomicOr,
510 sym::atomic_xor => AtomicRmwBinOp::AtomicXor,
511 _ => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
512 };
513
514 let ty_mem = fn_args.type_at(0);
516 let ty_op = fn_args.type_at(1);
518
519 let ordering = fn_args.const_at(2).to_value();
520 if (int_type_width_signed(ty_mem, bx.tcx()).is_some() && ty_op == ty_mem)
523 || (ty_mem.is_raw_ptr() && ty_op == bx.tcx().types.usize)
524 {
525 let ptr = args[0].immediate(); let val = args[1].immediate(); bx.atomic_rmw(
528 atom_op,
529 ptr,
530 val,
531 parse_atomic_ordering(ordering),
532 ty_mem.is_raw_ptr(),
533 )
534 } else {
535 invalid_monomorphization_int_or_ptr_type(ty_mem);
536 return Ok(());
537 }
538 }
539 sym::atomic_fence => {
540 let ordering = fn_args.const_at(0).to_value();
541 bx.atomic_fence(parse_atomic_ordering(ordering), SynchronizationScope::CrossThread);
542 return Ok(());
543 }
544
545 sym::atomic_singlethreadfence => {
546 let ordering = fn_args.const_at(0).to_value();
547 bx.atomic_fence(
548 parse_atomic_ordering(ordering),
549 SynchronizationScope::SingleThread,
550 );
551 return Ok(());
552 }
553
554 sym::nontemporal_store => {
555 let dst = args[0].deref(bx.cx());
556 args[1].val.nontemporal_store(bx, dst);
557 return Ok(());
558 }
559
560 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
561 let ty = fn_args.type_at(0);
562 let pointee_size = bx.layout_of(ty).size;
563
564 let a = args[0].immediate();
565 let b = args[1].immediate();
566 let a = bx.ptrtoint(a, bx.type_isize());
567 let b = bx.ptrtoint(b, bx.type_isize());
568 let pointee_size = bx.const_usize(pointee_size.bytes());
569 if name == sym::ptr_offset_from {
570 let d = bx.sub(a, b);
574 bx.exactsdiv(d, pointee_size)
576 } else {
577 let d = bx.unchecked_usub(a, b);
580 bx.exactudiv(d, pointee_size)
581 }
582 }
583
584 sym::cold_path => {
585 return Ok(());
587 }
588
589 _ => {
590 return bx.codegen_intrinsic_call(instance, args, result, span);
592 }
593 };
594
595 if result.layout.ty.is_bool() {
596 let val = bx.from_immediate(llval);
597 bx.store_to_place(val, result.val);
598 } else if !result.layout.ty.is_unit() {
599 bx.store_to_place(llval, result.val);
600 }
601 Ok(())
602 }
603}
604
605fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
610 match ty.kind() {
611 ty::Int(t) => {
612 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
613 }
614 ty::Uint(t) => {
615 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
616 }
617 _ => None,
618 }
619}
620
621fn float_type_width(ty: Ty<'_>) -> Option<u64> {
624 match ty.kind() {
625 ty::Float(t) => Some(t.bit_width()),
626 _ => None,
627 }
628}