Skip to main content

rustc_mir_transform/
inline.rs

1//! Inlining pass for MIR functions.
2
3use std::ops::{Range, RangeFrom};
4use std::{debug_assert_matches, iter};
5
6use rustc_abi::{ExternAbi, FieldIdx};
7use rustc_hir::attrs::{InlineAttr, OptimizeAttr};
8use rustc_hir::def::DefKind;
9use rustc_hir::def_id::DefId;
10use rustc_index::Idx;
11use rustc_index::bit_set::DenseBitSet;
12use rustc_middle::bug;
13use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
14use rustc_middle::mir::visit::*;
15use rustc_middle::mir::*;
16use rustc_middle::ty::{
17    self, Instance, InstanceKind, Ty, TyCtxt, TypeFlags, TypeVisitableExt, Unnormalized,
18};
19use rustc_session::config::{DebugInfo, OptLevel};
20use rustc_span::Spanned;
21use tracing::{debug, instrument, trace, trace_span};
22
23use crate::cost_checker::{CostChecker, is_call_like};
24use crate::simplify::{UsedInStmtLocals, simplify_cfg};
25use crate::validate::validate_types;
26use crate::{check_inline, util};
27
28pub(crate) mod cycle;
29
30const HISTORY_DEPTH_LIMIT: usize = 20;
31const TOP_DOWN_DEPTH_LIMIT: usize = 5;
32
33#[derive(Clone, Debug)]
34struct CallSite<'tcx> {
35    callee: Instance<'tcx>,
36    fn_sig: ty::PolyFnSig<'tcx>,
37    block: BasicBlock,
38    source_info: SourceInfo,
39}
40
41// Made public so that `mir_drops_elaborated_and_const_checked` can be overridden
42// by custom rustc drivers, running all the steps by themselves. See #114628.
43pub struct Inline;
44
45impl<'tcx> crate::MirPass<'tcx> for Inline {
46    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
47        if let Some(enabled) = sess.opts.unstable_opts.inline_mir {
48            return enabled;
49        }
50
51        match sess.mir_opt_level() {
52            0 | 1 => false,
53            2 => {
54                (sess.opts.optimize == OptLevel::More || sess.opts.optimize == OptLevel::Aggressive)
55                    && sess.opts.incremental == None
56            }
57            _ => true,
58        }
59    }
60
61    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
62        let span = trace_span!("inline", body = %tcx.def_path_str(body.source.def_id()));
63        let _guard = span.enter();
64        if inline::<NormalInliner<'tcx>>(tcx, body) {
65            debug!("running simplify cfg on {:?}", body.source);
66            simplify_cfg(tcx, body);
67        }
68    }
69
70    fn is_required(&self) -> bool {
71        false
72    }
73}
74
75pub struct ForceInline;
76
77impl ForceInline {
78    pub fn should_run_pass_for_callee<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
79        matches!(tcx.codegen_fn_attrs(def_id).inline, InlineAttr::Force { .. })
80    }
81}
82
83impl<'tcx> crate::MirPass<'tcx> for ForceInline {
84    fn is_enabled(&self, _: &rustc_session::Session) -> bool {
85        true
86    }
87
88    fn can_be_overridden(&self) -> bool {
89        false
90    }
91
92    fn is_required(&self) -> bool {
93        true
94    }
95
96    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
97        let span = trace_span!("force_inline", body = %tcx.def_path_str(body.source.def_id()));
98        let _guard = span.enter();
99        if inline::<ForceInliner<'tcx>>(tcx, body) {
100            debug!("running simplify cfg on {:?}", body.source);
101            simplify_cfg(tcx, body);
102        }
103    }
104}
105
106trait Inliner<'tcx> {
107    fn new(tcx: TyCtxt<'tcx>, def_id: DefId, body: &Body<'tcx>) -> Self;
108
109    fn tcx(&self) -> TyCtxt<'tcx>;
110    fn typing_env(&self) -> ty::TypingEnv<'tcx>;
111    fn history(&self) -> &[DefId];
112    fn caller_def_id(&self) -> DefId;
113
114    /// Has the caller body been changed?
115    fn changed(self) -> bool;
116
117    /// Should inlining happen for a given callee?
118    fn should_inline_for_callee(&self, def_id: DefId) -> bool;
119
120    fn check_codegen_attributes_extra(
121        &self,
122        callee_attrs: &CodegenFnAttrs,
123    ) -> Result<(), &'static str>;
124
125    fn check_caller_mir_body(&self, body: &Body<'tcx>) -> bool;
126
127    /// Returns inlining decision that is based on the examination of callee MIR body.
128    /// Assumes that codegen attributes have been checked for compatibility already.
129    fn check_callee_mir_body(
130        &self,
131        callsite: &CallSite<'tcx>,
132        callee_body: &Body<'tcx>,
133        callee_attrs: &CodegenFnAttrs,
134    ) -> Result<(), &'static str>;
135
136    /// Called when inlining succeeds.
137    fn on_inline_success(
138        &mut self,
139        callsite: &CallSite<'tcx>,
140        caller_body: &mut Body<'tcx>,
141        new_blocks: std::ops::Range<BasicBlock>,
142    );
143
144    /// Called when inlining failed or was not performed.
145    fn on_inline_failure(&self, callsite: &CallSite<'tcx>, reason: &'static str);
146}
147
148struct ForceInliner<'tcx> {
149    tcx: TyCtxt<'tcx>,
150    typing_env: ty::TypingEnv<'tcx>,
151    /// `DefId` of caller.
152    def_id: DefId,
153    /// Stack of inlined instances.
154    /// We only check the `DefId` and not the args because we want to
155    /// avoid inlining cases of polymorphic recursion.
156    /// The number of `DefId`s is finite, so checking history is enough
157    /// to ensure that we do not loop endlessly while inlining.
158    history: Vec<DefId>,
159    /// Indicates that the caller body has been modified.
160    changed: bool,
161}
162
163impl<'tcx> Inliner<'tcx> for ForceInliner<'tcx> {
164    fn new(tcx: TyCtxt<'tcx>, def_id: DefId, body: &Body<'tcx>) -> Self {
165        Self { tcx, typing_env: body.typing_env(tcx), def_id, history: Vec::new(), changed: false }
166    }
167
168    fn tcx(&self) -> TyCtxt<'tcx> {
169        self.tcx
170    }
171
172    fn typing_env(&self) -> ty::TypingEnv<'tcx> {
173        self.typing_env
174    }
175
176    fn history(&self) -> &[DefId] {
177        &self.history
178    }
179
180    fn caller_def_id(&self) -> DefId {
181        self.def_id
182    }
183
184    fn changed(self) -> bool {
185        self.changed
186    }
187
188    fn should_inline_for_callee(&self, def_id: DefId) -> bool {
189        ForceInline::should_run_pass_for_callee(self.tcx(), def_id)
190    }
191
192    fn check_codegen_attributes_extra(
193        &self,
194        callee_attrs: &CodegenFnAttrs,
195    ) -> Result<(), &'static str> {
196        debug_assert_matches!(callee_attrs.inline, InlineAttr::Force { .. });
197        Ok(())
198    }
199
200    fn check_caller_mir_body(&self, _: &Body<'tcx>) -> bool {
201        true
202    }
203
204    #[instrument(level = "debug", skip(self, callee_body))]
205    fn check_callee_mir_body(
206        &self,
207        _: &CallSite<'tcx>,
208        callee_body: &Body<'tcx>,
209        callee_attrs: &CodegenFnAttrs,
210    ) -> Result<(), &'static str> {
211        if callee_body.tainted_by_errors.is_some() {
212            return Err("body has errors");
213        }
214
215        let caller_attrs = self.tcx().codegen_fn_attrs(self.caller_def_id());
216        if callee_attrs.instruction_set != caller_attrs.instruction_set
217            && callee_body
218                .basic_blocks
219                .iter()
220                .any(|bb| matches!(bb.terminator().kind, TerminatorKind::InlineAsm { .. }))
221        {
222            // During the attribute checking stage we allow a callee with no
223            // instruction_set assigned to count as compatible with a function that does
224            // assign one. However, during this stage we require an exact match when any
225            // inline-asm is detected. LLVM will still possibly do an inline later on
226            // if the no-attribute function ends up with the same instruction set anyway.
227            Err("cannot move inline-asm across instruction sets")
228        } else {
229            Ok(())
230        }
231    }
232
233    fn on_inline_success(
234        &mut self,
235        callsite: &CallSite<'tcx>,
236        caller_body: &mut Body<'tcx>,
237        new_blocks: std::ops::Range<BasicBlock>,
238    ) {
239        self.changed = true;
240
241        self.history.push(callsite.callee.def_id());
242        process_blocks(self, caller_body, new_blocks);
243        self.history.pop();
244    }
245
246    fn on_inline_failure(&self, callsite: &CallSite<'tcx>, reason: &'static str) {
247        let tcx = self.tcx();
248        let InlineAttr::Force { attr_span, reason: justification } =
249            tcx.codegen_instance_attrs(callsite.callee.def).inline
250        else {
251            bug!("called on item without required inlining");
252        };
253
254        let call_span = callsite.source_info.span;
255        let callee = tcx.def_path_str(callsite.callee.def_id());
256        tcx.dcx().emit_err(crate::errors::ForceInlineFailure {
257            call_span,
258            attr_span,
259            caller_span: tcx.def_span(self.def_id),
260            caller: tcx.def_path_str(self.def_id),
261            callee_span: tcx.def_span(callsite.callee.def_id()),
262            callee: callee.clone(),
263            reason,
264            justification: justification
265                .map(|sym| crate::errors::ForceInlineJustification { sym, callee }),
266        });
267    }
268}
269
270struct NormalInliner<'tcx> {
271    tcx: TyCtxt<'tcx>,
272    typing_env: ty::TypingEnv<'tcx>,
273    /// `DefId` of caller.
274    def_id: DefId,
275    /// Stack of inlined instances.
276    /// We only check the `DefId` and not the args because we want to
277    /// avoid inlining cases of polymorphic recursion.
278    /// The number of `DefId`s is finite, so checking history is enough
279    /// to ensure that we do not loop endlessly while inlining.
280    history: Vec<DefId>,
281    /// How many (multi-call) callsites have we inlined for the top-level call?
282    ///
283    /// We need to limit this in order to prevent super-linear growth in MIR size.
284    top_down_counter: usize,
285    /// Indicates that the caller body has been modified.
286    changed: bool,
287    /// Indicates that the caller is #[inline] and just calls another function,
288    /// and thus we can inline less into it as it'll be inlined itself.
289    caller_is_inline_forwarder: bool,
290}
291
292impl<'tcx> NormalInliner<'tcx> {
293    fn past_depth_limit(&self) -> bool {
294        self.history.len() > HISTORY_DEPTH_LIMIT || self.top_down_counter > TOP_DOWN_DEPTH_LIMIT
295    }
296}
297
298impl<'tcx> Inliner<'tcx> for NormalInliner<'tcx> {
299    fn new(tcx: TyCtxt<'tcx>, def_id: DefId, body: &Body<'tcx>) -> Self {
300        let typing_env = body.typing_env(tcx);
301        let codegen_fn_attrs = tcx.codegen_fn_attrs(def_id);
302
303        Self {
304            tcx,
305            typing_env,
306            def_id,
307            history: Vec::new(),
308            top_down_counter: 0,
309            changed: false,
310            caller_is_inline_forwarder: matches!(
311                codegen_fn_attrs.inline,
312                InlineAttr::Hint | InlineAttr::Always | InlineAttr::Force { .. }
313            ) && body_is_forwarder(body),
314        }
315    }
316
317    fn tcx(&self) -> TyCtxt<'tcx> {
318        self.tcx
319    }
320
321    fn caller_def_id(&self) -> DefId {
322        self.def_id
323    }
324
325    fn typing_env(&self) -> ty::TypingEnv<'tcx> {
326        self.typing_env
327    }
328
329    fn history(&self) -> &[DefId] {
330        &self.history
331    }
332
333    fn changed(self) -> bool {
334        self.changed
335    }
336
337    fn should_inline_for_callee(&self, _: DefId) -> bool {
338        true
339    }
340
341    fn check_codegen_attributes_extra(
342        &self,
343        callee_attrs: &CodegenFnAttrs,
344    ) -> Result<(), &'static str> {
345        if self.past_depth_limit() && matches!(callee_attrs.inline, InlineAttr::None) {
346            Err("Past depth limit so not inspecting unmarked callee")
347        } else {
348            Ok(())
349        }
350    }
351
352    fn check_caller_mir_body(&self, body: &Body<'tcx>) -> bool {
353        // Avoid inlining into coroutines, since their `optimized_mir` is used for layout computation,
354        // which can create a cycle, even when no attempt is made to inline the function in the other
355        // direction.
356        if body.coroutine.is_some() {
357            return false;
358        }
359
360        true
361    }
362
363    #[instrument(level = "debug", skip(self, callee_body))]
364    fn check_callee_mir_body(
365        &self,
366        callsite: &CallSite<'tcx>,
367        callee_body: &Body<'tcx>,
368        callee_attrs: &CodegenFnAttrs,
369    ) -> Result<(), &'static str> {
370        let tcx = self.tcx();
371
372        if let Some(_) = callee_body.tainted_by_errors {
373            return Err("body has errors");
374        }
375
376        if self.past_depth_limit() && callee_body.basic_blocks.len() > 1 {
377            return Err("Not inlining multi-block body as we're past a depth limit");
378        }
379
380        let mut threshold = if self.caller_is_inline_forwarder || self.past_depth_limit() {
381            tcx.sess.opts.unstable_opts.inline_mir_forwarder_threshold.unwrap_or(30)
382        } else if tcx.cross_crate_inlinable(callsite.callee.def_id()) {
383            tcx.sess.opts.unstable_opts.inline_mir_hint_threshold.unwrap_or(100)
384        } else {
385            tcx.sess.opts.unstable_opts.inline_mir_threshold.unwrap_or(50)
386        };
387
388        // Give a bonus functions with a small number of blocks,
389        // We normally have two or three blocks for even
390        // very small functions.
391        if callee_body.basic_blocks.len() <= 3 {
392            threshold += threshold / 4;
393        }
394        debug!("    final inline threshold = {}", threshold);
395
396        // FIXME: Give a bonus to functions with only a single caller
397
398        let mut checker =
399            CostChecker::new(tcx, self.typing_env(), Some(callsite.callee), callee_body);
400
401        checker.add_function_level_costs();
402
403        // Traverse the MIR manually so we can account for the effects of inlining on the CFG.
404        let mut work_list = vec![START_BLOCK];
405        let mut visited = DenseBitSet::new_empty(callee_body.basic_blocks.len());
406        while let Some(bb) = work_list.pop() {
407            if !visited.insert(bb.index()) {
408                continue;
409            }
410
411            let blk = &callee_body.basic_blocks[bb];
412            checker.visit_basic_block_data(bb, blk);
413
414            let term = blk.terminator();
415            let caller_attrs = tcx.codegen_fn_attrs(self.caller_def_id());
416            if let TerminatorKind::Drop {
417                ref place,
418                target,
419                unwind,
420                replace: _,
421                drop: _,
422                async_fut: _,
423            } = term.kind
424            {
425                work_list.push(target);
426
427                // If the place doesn't actually need dropping, treat it like a regular goto.
428                let ty = callsite
429                    .callee
430                    .instantiate_mir(tcx, ty::EarlyBinder::bind(&place.ty(callee_body, tcx).ty));
431                if ty.needs_drop(tcx, self.typing_env())
432                    && let UnwindAction::Cleanup(unwind) = unwind
433                {
434                    work_list.push(unwind);
435                }
436            } else if callee_attrs.instruction_set != caller_attrs.instruction_set
437                && matches!(term.kind, TerminatorKind::InlineAsm { .. })
438            {
439                // During the attribute checking stage we allow a callee with no
440                // instruction_set assigned to count as compatible with a function that does
441                // assign one. However, during this stage we require an exact match when any
442                // inline-asm is detected. LLVM will still possibly do an inline later on
443                // if the no-attribute function ends up with the same instruction set anyway.
444                return Err("cannot move inline-asm across instruction sets");
445            } else if let TerminatorKind::TailCall { .. } = term.kind {
446                // FIXME(explicit_tail_calls): figure out how exactly functions containing tail
447                // calls can be inlined (and if they even should)
448                return Err("can't inline functions with tail calls");
449            } else {
450                work_list.extend(term.successors())
451            }
452        }
453
454        // N.B. We still apply our cost threshold to #[inline(always)] functions.
455        // That attribute is often applied to very large functions that exceed LLVM's (very
456        // generous) inlining threshold. Such functions are very poor MIR inlining candidates.
457        // Always inlining #[inline(always)] functions in MIR, on net, slows down the compiler.
458        let cost = checker.cost();
459        if cost <= threshold {
460            debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
461            Ok(())
462        } else {
463            debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
464            Err("cost above threshold")
465        }
466    }
467
468    fn on_inline_success(
469        &mut self,
470        callsite: &CallSite<'tcx>,
471        caller_body: &mut Body<'tcx>,
472        new_blocks: std::ops::Range<BasicBlock>,
473    ) {
474        self.changed = true;
475
476        let new_calls_count = new_blocks
477            .clone()
478            .filter(|&bb| is_call_like(caller_body.basic_blocks[bb].terminator()))
479            .count();
480        if new_calls_count > 1 {
481            self.top_down_counter += 1;
482        }
483
484        self.history.push(callsite.callee.def_id());
485        process_blocks(self, caller_body, new_blocks);
486        self.history.pop();
487
488        if self.history.is_empty() {
489            self.top_down_counter = 0;
490        }
491    }
492
493    fn on_inline_failure(&self, _: &CallSite<'tcx>, _: &'static str) {}
494}
495
496fn inline<'tcx, T: Inliner<'tcx>>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
497    let def_id = body.source.def_id();
498
499    // Only do inlining into fn bodies.
500    if !tcx.hir_body_owner_kind(def_id).is_fn_or_closure() {
501        return false;
502    }
503
504    let mut inliner = T::new(tcx, def_id, body);
505    if !inliner.check_caller_mir_body(body) {
506        return false;
507    }
508
509    let blocks = START_BLOCK..body.basic_blocks.next_index();
510    process_blocks(&mut inliner, body, blocks);
511    inliner.changed()
512}
513
514fn process_blocks<'tcx, I: Inliner<'tcx>>(
515    inliner: &mut I,
516    caller_body: &mut Body<'tcx>,
517    blocks: Range<BasicBlock>,
518) {
519    for bb in blocks {
520        let bb_data = &caller_body[bb];
521        if bb_data.is_cleanup {
522            continue;
523        }
524
525        let Some(callsite) = resolve_callsite(inliner, caller_body, bb, bb_data) else {
526            continue;
527        };
528
529        let span = trace_span!("process_blocks", %callsite.callee, ?bb);
530        let _guard = span.enter();
531
532        match try_inlining(inliner, caller_body, &callsite) {
533            Err(reason) => {
534                debug!("not-inlined {} [{}]", callsite.callee, reason);
535                inliner.on_inline_failure(&callsite, reason);
536            }
537            Ok(new_blocks) => {
538                debug!("inlined {}", callsite.callee);
539                inliner.on_inline_success(&callsite, caller_body, new_blocks);
540            }
541        }
542    }
543}
544
545fn resolve_callsite<'tcx, I: Inliner<'tcx>>(
546    inliner: &I,
547    caller_body: &Body<'tcx>,
548    bb: BasicBlock,
549    bb_data: &BasicBlockData<'tcx>,
550) -> Option<CallSite<'tcx>> {
551    let tcx = inliner.tcx();
552    // Only consider direct calls to functions
553    let terminator = bb_data.terminator();
554
555    // FIXME(explicit_tail_calls): figure out if we can inline tail calls
556    if let TerminatorKind::Call { ref func, fn_span, .. } = terminator.kind {
557        let func_ty = func.ty(caller_body, tcx);
558        if let ty::FnDef(def_id, args) = *func_ty.kind() {
559            if !inliner.should_inline_for_callee(def_id) {
560                debug!("not enabled");
561                return None;
562            }
563
564            // To resolve an instance its args have to be fully normalized.
565            let args = tcx
566                .try_normalize_erasing_regions(inliner.typing_env(), Unnormalized::new_wip(args))
567                .ok()?;
568            let callee =
569                Instance::try_resolve(tcx, inliner.typing_env(), def_id, args).ok().flatten()?;
570
571            if let InstanceKind::Virtual(..) | InstanceKind::Intrinsic(_) = callee.def {
572                return None;
573            }
574
575            if inliner.history().contains(&callee.def_id()) {
576                return None;
577            }
578
579            let fn_sig = tcx.fn_sig(def_id).instantiate(tcx, args).skip_norm_wip();
580
581            // Additionally, check that the body that we're inlining actually agrees
582            // with the ABI of the trait that the item comes from.
583            if let InstanceKind::Item(instance_def_id) = callee.def
584                && tcx.def_kind(instance_def_id) == DefKind::AssocFn
585                && let instance_fn_sig = tcx.fn_sig(instance_def_id).skip_binder()
586                && instance_fn_sig.abi() != fn_sig.abi()
587            {
588                return None;
589            }
590
591            let source_info = SourceInfo { span: fn_span, ..terminator.source_info };
592
593            return Some(CallSite { callee, fn_sig, block: bb, source_info });
594        }
595    }
596
597    None
598}
599
600/// Attempts to inline a callsite into the caller body. When successful returns basic blocks
601/// containing the inlined body. Otherwise returns an error describing why inlining didn't take
602/// place.
603fn try_inlining<'tcx, I: Inliner<'tcx>>(
604    inliner: &I,
605    caller_body: &mut Body<'tcx>,
606    callsite: &CallSite<'tcx>,
607) -> Result<std::ops::Range<BasicBlock>, &'static str> {
608    let tcx = inliner.tcx();
609    check_mir_is_available(inliner, caller_body, callsite.callee)?;
610
611    let callee_attrs = tcx.codegen_instance_attrs(callsite.callee.def);
612    let callee_attrs = callee_attrs.as_ref();
613    check_inline::is_inline_valid_on_fn(tcx, callsite.callee.def_id())?;
614    check_codegen_attributes(inliner, callsite, callee_attrs)?;
615
616    let terminator = caller_body[callsite.block].terminator.as_ref().unwrap();
617    let TerminatorKind::Call { args, destination, .. } = &terminator.kind else { bug!() };
618    let destination_ty = destination.ty(&caller_body.local_decls, tcx).ty;
619    for arg in args {
620        if !arg.node.ty(&caller_body.local_decls, tcx).is_sized(tcx, inliner.typing_env()) {
621            // We do not allow inlining functions with unsized params. Inlining these functions
622            // could create unsized locals, which are unsound and being phased out.
623            return Err("call has unsized argument");
624        }
625    }
626
627    let callee_body = try_instance_mir(tcx, callsite.callee.def)?;
628    check_inline::is_inline_valid_on_body(tcx, callee_body)?;
629    inliner.check_callee_mir_body(callsite, callee_body, callee_attrs)?;
630
631    let Ok(callee_body) = callsite.callee.try_instantiate_mir_and_normalize_erasing_regions(
632        tcx,
633        inliner.typing_env(),
634        ty::EarlyBinder::bind(callee_body.clone()),
635    ) else {
636        debug!("failed to normalize callee body");
637        return Err("implementation limitation -- could not normalize callee body");
638    };
639
640    // Normally, this shouldn't be required, but trait normalization failure can create a
641    // validation ICE.
642    if !validate_types(tcx, inliner.typing_env(), &callee_body, caller_body).is_empty() {
643        debug!("failed to validate callee body");
644        return Err("implementation limitation -- callee body failed validation");
645    }
646
647    // Check call signature compatibility.
648    // Normally, this shouldn't be required, but trait normalization failure can create a
649    // validation ICE.
650    let output_type = callee_body.return_ty();
651    if !util::sub_types(tcx, inliner.typing_env(), output_type, destination_ty) {
652        trace!(?output_type, ?destination_ty);
653        return Err("implementation limitation -- return type mismatch");
654    }
655    if callsite.fn_sig.abi() == ExternAbi::RustCall {
656        let (self_arg, arg_tuple) = match &args[..] {
657            [arg_tuple] => (None, arg_tuple),
658            [self_arg, arg_tuple] => (Some(self_arg), arg_tuple),
659            _ => bug!("Expected `rust-call` to have 1 or 2 args"),
660        };
661
662        let self_arg_ty = self_arg.map(|self_arg| self_arg.node.ty(&caller_body.local_decls, tcx));
663
664        let arg_tuple_ty = arg_tuple.node.ty(&caller_body.local_decls, tcx);
665        let arg_tys = if callee_body.spread_arg.is_some() {
666            std::slice::from_ref(&arg_tuple_ty)
667        } else {
668            let ty::Tuple(arg_tuple_tys) = *arg_tuple_ty.kind() else {
669                bug!("Closure arguments are not passed as a tuple");
670            };
671            arg_tuple_tys.as_slice()
672        };
673
674        for (arg_ty, input) in
675            self_arg_ty.into_iter().chain(arg_tys.iter().copied()).zip(callee_body.args_iter())
676        {
677            let input_type = callee_body.local_decls[input].ty;
678            if !util::sub_types(tcx, inliner.typing_env(), input_type, arg_ty) {
679                trace!(?arg_ty, ?input_type);
680                debug!("failed to normalize tuple argument type");
681                return Err("implementation limitation");
682            }
683        }
684    } else {
685        for (arg, input) in args.iter().zip(callee_body.args_iter()) {
686            let input_type = callee_body.local_decls[input].ty;
687            let arg_ty = arg.node.ty(&caller_body.local_decls, tcx);
688            if !util::sub_types(tcx, inliner.typing_env(), input_type, arg_ty) {
689                trace!(?arg_ty, ?input_type);
690                debug!("failed to normalize argument type");
691                return Err("implementation limitation -- arg mismatch");
692            }
693        }
694    }
695
696    let old_blocks = caller_body.basic_blocks.next_index();
697    inline_call(inliner, caller_body, callsite, callee_body);
698    let new_blocks = old_blocks..caller_body.basic_blocks.next_index();
699
700    Ok(new_blocks)
701}
702
703fn check_mir_is_available<'tcx, I: Inliner<'tcx>>(
704    inliner: &I,
705    caller_body: &Body<'tcx>,
706    callee: Instance<'tcx>,
707) -> Result<(), &'static str> {
708    let caller_def_id = caller_body.source.def_id();
709    let callee_def_id = callee.def_id();
710    if callee_def_id == caller_def_id {
711        return Err("self-recursion");
712    }
713
714    match callee.def {
715        InstanceKind::Item(_) => {
716            // If there is no MIR available (either because it was not in metadata or
717            // because it has no MIR because it's an extern function), then the inliner
718            // won't cause cycles on this.
719            if !inliner.tcx().is_mir_available(callee_def_id) {
720                debug!("item MIR unavailable");
721                return Err("implementation limitation -- MIR unavailable");
722            }
723        }
724        // These have no own callable MIR.
725        InstanceKind::Intrinsic(_) | InstanceKind::Virtual(..) => {
726            debug!("instance without MIR (intrinsic / virtual)");
727            return Err("implementation limitation -- cannot inline intrinsic");
728        }
729
730        // FIXME(#127030): `ConstParamHasTy` has bad interactions with
731        // the drop shim builder, which does not evaluate predicates in
732        // the correct param-env for types being dropped. Stall resolving
733        // the MIR for this instance until all of its const params are
734        // substituted.
735        InstanceKind::DropGlue(_, Some(ty)) if ty.has_type_flags(TypeFlags::HAS_CT_PARAM) => {
736            debug!("still needs substitution");
737            return Err("implementation limitation -- HACK for dropping polymorphic type");
738        }
739        InstanceKind::AsyncDropGlue(_, ty) | InstanceKind::AsyncDropGlueCtorShim(_, ty) => {
740            return if ty.still_further_specializable() {
741                Err("still needs substitution")
742            } else {
743                Ok(())
744            };
745        }
746        InstanceKind::FutureDropPollShim(_, ty, ty2) => {
747            return if ty.still_further_specializable() || ty2.still_further_specializable() {
748                Err("still needs substitution")
749            } else {
750                Ok(())
751            };
752        }
753
754        // This cannot result in an immediate cycle since the callee MIR is a shim, which does
755        // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we
756        // do not need to catch this here, we can wait until the inliner decides to continue
757        // inlining a second time.
758        InstanceKind::VTableShim(_)
759        | InstanceKind::ReifyShim(..)
760        | InstanceKind::FnPtrShim(..)
761        | InstanceKind::ClosureOnceShim { .. }
762        | InstanceKind::ConstructCoroutineInClosureShim { .. }
763        | InstanceKind::DropGlue(..)
764        | InstanceKind::CloneShim(..)
765        | InstanceKind::ThreadLocalShim(..)
766        | InstanceKind::FnPtrAddrShim(..) => return Ok(()),
767    }
768
769    if inliner.tcx().is_constructor(callee_def_id) {
770        trace!("constructors always have MIR");
771        // Constructor functions cannot cause a query cycle.
772        return Ok(());
773    }
774
775    if let Some(callee_def_id) = callee_def_id.as_local()
776        && !inliner
777            .tcx()
778            .is_lang_item(inliner.tcx().parent(caller_def_id), rustc_hir::LangItem::FnOnce)
779    {
780        // If we know for sure that the function we're calling will itself try to
781        // call us, then we avoid inlining that function.
782        let Some(cyclic_callees) = inliner.tcx().mir_callgraph_cyclic(caller_def_id.expect_local())
783        else {
784            return Err("call graph cycle detection bailed due to recursion limit");
785        };
786        if cyclic_callees.contains(&callee_def_id) {
787            debug!("query cycle avoidance");
788            return Err("caller might be reachable from callee");
789        }
790
791        Ok(())
792    } else {
793        // This cannot result in an immediate cycle since the callee MIR is from another crate
794        // and is already optimized. Any subsequent inlining may cause cycles, but we do
795        // not need to catch this here, we can wait until the inliner decides to continue
796        // inlining a second time.
797        trace!("functions from other crates always have MIR");
798        Ok(())
799    }
800}
801
802/// Returns an error if inlining is not possible based on codegen attributes alone. A success
803/// indicates that inlining decision should be based on other criteria.
804fn check_codegen_attributes<'tcx, I: Inliner<'tcx>>(
805    inliner: &I,
806    callsite: &CallSite<'tcx>,
807    callee_attrs: &CodegenFnAttrs,
808) -> Result<(), &'static str> {
809    let tcx = inliner.tcx();
810    if let InlineAttr::Never = callee_attrs.inline {
811        return Err("never inline attribute");
812    }
813
814    if let OptimizeAttr::DoNotOptimize = callee_attrs.optimize {
815        return Err("has DoNotOptimize attribute");
816    }
817
818    inliner.check_codegen_attributes_extra(callee_attrs)?;
819
820    // Reachability pass defines which functions are eligible for inlining. Generally inlining
821    // other functions is incorrect because they could reference symbols that aren't exported.
822    let is_generic = callsite.callee.args.non_erasable_generics().next().is_some();
823    if !is_generic && !tcx.cross_crate_inlinable(callsite.callee.def_id()) {
824        return Err("not exported");
825    }
826
827    let codegen_fn_attrs = tcx.codegen_fn_attrs(inliner.caller_def_id());
828    if callee_attrs.sanitizers != codegen_fn_attrs.sanitizers {
829        return Err("incompatible sanitizer set");
830    }
831
832    // Two functions are compatible if the callee has no attribute (meaning
833    // that it's codegen agnostic), or sets an attribute that is identical
834    // to this function's attribute.
835    if callee_attrs.instruction_set.is_some()
836        && callee_attrs.instruction_set != codegen_fn_attrs.instruction_set
837    {
838        return Err("incompatible instruction set");
839    }
840
841    let callee_feature_names = callee_attrs.target_features.iter().map(|f| f.name);
842    let this_feature_names = codegen_fn_attrs.target_features.iter().map(|f| f.name);
843    if callee_feature_names.ne(this_feature_names) {
844        // In general it is not correct to inline a callee with target features that are a
845        // subset of the caller. This is because the callee might contain calls, and the ABI of
846        // those calls depends on the target features of the surrounding function. By moving a
847        // `Call` terminator from one MIR body to another with more target features, we might
848        // change the ABI of that call!
849        return Err("incompatible target features");
850    }
851
852    Ok(())
853}
854
855fn inline_call<'tcx, I: Inliner<'tcx>>(
856    inliner: &I,
857    caller_body: &mut Body<'tcx>,
858    callsite: &CallSite<'tcx>,
859    mut callee_body: Body<'tcx>,
860) {
861    let tcx = inliner.tcx();
862    let terminator = caller_body[callsite.block].terminator.take().unwrap();
863    let TerminatorKind::Call { func, args, destination, unwind, target, .. } = terminator.kind
864    else {
865        bug!("unexpected terminator kind {:?}", terminator.kind);
866    };
867
868    let return_block = if let Some(block) = target {
869        // Prepare a new block for code that should execute when call returns. We don't use
870        // target block directly since it might have other predecessors.
871        let data = BasicBlockData::new(
872            Some(Terminator {
873                source_info: terminator.source_info,
874                kind: TerminatorKind::Goto { target: block },
875            }),
876            caller_body[block].is_cleanup,
877        );
878        Some(caller_body.basic_blocks_mut().push(data))
879    } else {
880        None
881    };
882
883    // If the call is something like `a[*i] = f(i)`, where
884    // `i : &mut usize`, then just duplicating the `a[*i]`
885    // Place could result in two different locations if `f`
886    // writes to `i`. To prevent this we need to create a temporary
887    // borrow of the place and pass the destination as `*temp` instead.
888    fn dest_needs_borrow(place: Place<'_>) -> bool {
889        for elem in place.projection.iter() {
890            match elem {
891                ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
892                _ => {}
893            }
894        }
895
896        false
897    }
898
899    let dest = if dest_needs_borrow(destination) {
900        trace!("creating temp for return destination");
901        let dest = Rvalue::Ref(
902            tcx.lifetimes.re_erased,
903            BorrowKind::Mut { kind: MutBorrowKind::Default },
904            destination,
905        );
906        let dest_ty = dest.ty(caller_body, tcx);
907        let temp = Place::from(new_call_temp(caller_body, callsite, dest_ty, return_block));
908        caller_body[callsite.block].statements.push(Statement::new(
909            callsite.source_info,
910            StatementKind::Assign(Box::new((temp, dest))),
911        ));
912        tcx.mk_place_deref(temp)
913    } else {
914        destination
915    };
916
917    // Always create a local to hold the destination, as `RETURN_PLACE` may appear
918    // where a full `Place` is not allowed.
919    let (remap_destination, destination_local) = if let Some(d) = dest.as_local() {
920        (false, d)
921    } else {
922        (
923            true,
924            new_call_temp(caller_body, callsite, destination.ty(caller_body, tcx).ty, return_block),
925        )
926    };
927
928    // Copy the arguments if needed.
929    let args = make_call_args(inliner, args, callsite, caller_body, &callee_body, return_block);
930
931    let mut integrator = Integrator {
932        args: &args,
933        new_locals: caller_body.local_decls.next_index()..,
934        new_scopes: caller_body.source_scopes.next_index()..,
935        new_blocks: caller_body.basic_blocks.next_index()..,
936        destination: destination_local,
937        callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(),
938        callsite,
939        cleanup_block: unwind,
940        in_cleanup_block: false,
941        return_block,
942        tcx,
943        always_live_locals: UsedInStmtLocals::new(&callee_body).locals,
944    };
945
946    // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
947    // (or existing ones, in a few special cases) in the caller.
948    integrator.visit_body(&mut callee_body);
949
950    // If there are any locals without storage markers, give them storage only for the
951    // duration of the call.
952    for local in callee_body.vars_and_temps_iter() {
953        if integrator.always_live_locals.contains(local) {
954            let new_local = integrator.map_local(local);
955            caller_body[callsite.block]
956                .statements
957                .push(Statement::new(callsite.source_info, StatementKind::StorageLive(new_local)));
958        }
959    }
960    if let Some(block) = return_block {
961        // To avoid repeated O(n) insert, push any new statements to the end and rotate
962        // the slice once.
963        let mut n = 0;
964        if remap_destination {
965            caller_body[block].statements.push(Statement::new(
966                callsite.source_info,
967                StatementKind::Assign(Box::new((
968                    dest,
969                    Rvalue::Use(Operand::Move(destination_local.into()), WithRetag::Yes),
970                ))),
971            ));
972            n += 1;
973        }
974        for local in callee_body.vars_and_temps_iter().rev() {
975            if integrator.always_live_locals.contains(local) {
976                let new_local = integrator.map_local(local);
977                caller_body[block].statements.push(Statement::new(
978                    callsite.source_info,
979                    StatementKind::StorageDead(new_local),
980                ));
981                n += 1;
982            }
983        }
984        caller_body[block].statements.rotate_right(n);
985    }
986
987    // Insert all of the (mapped) parts of the callee body into the caller.
988    caller_body.local_decls.extend(callee_body.drain_vars_and_temps());
989    caller_body.source_scopes.append(&mut callee_body.source_scopes);
990
991    // only "full" debug promises any variable-level information
992    if tcx
993        .sess
994        .opts
995        .unstable_opts
996        .inline_mir_preserve_debug
997        .unwrap_or(tcx.sess.opts.debuginfo == DebugInfo::Full)
998    {
999        // -Zinline-mir-preserve-debug is enabled when building the standard library, so that
1000        // people working on rust can build with or without debuginfo while
1001        // still getting consistent results from the mir-opt tests.
1002        caller_body.var_debug_info.append(&mut callee_body.var_debug_info);
1003    } else {
1004        for bb in callee_body.basic_blocks_mut() {
1005            bb.drop_debuginfo();
1006        }
1007    }
1008    caller_body.basic_blocks_mut().append(callee_body.basic_blocks_mut());
1009
1010    caller_body[callsite.block].terminator = Some(Terminator {
1011        source_info: callsite.source_info,
1012        kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) },
1013    });
1014
1015    // Copy required constants from the callee_body into the caller_body. Although we are only
1016    // pushing unevaluated consts to `required_consts`, here they may have been evaluated
1017    // because we are calling `instantiate_and_normalize_erasing_regions` -- so we filter again.
1018    caller_body.required_consts.as_mut().unwrap().extend(
1019        callee_body.required_consts().into_iter().filter(|ct| ct.const_.is_required_const()),
1020    );
1021    // Now that we incorporated the callee's `required_consts`, we can remove the callee from
1022    // `mentioned_items` -- but we have to take their `mentioned_items` in return. This does
1023    // some extra work here to save the monomorphization collector work later. It helps a lot,
1024    // since monomorphization can avoid a lot of work when the "mentioned items" are similar to
1025    // the actually used items. By doing this we can entirely avoid visiting the callee!
1026    // We need to reconstruct the `required_item` for the callee so that we can find and
1027    // remove it.
1028    let callee_item = MentionedItem::Fn(func.ty(caller_body, tcx));
1029    let caller_mentioned_items = caller_body.mentioned_items.as_mut().unwrap();
1030    if let Some(idx) = caller_mentioned_items.iter().position(|item| item.node == callee_item) {
1031        // We found the callee, so remove it and add its items instead.
1032        caller_mentioned_items.remove(idx);
1033        caller_mentioned_items.extend(callee_body.mentioned_items());
1034    } else {
1035        // If we can't find the callee, there's no point in adding its items. Probably it
1036        // already got removed by being inlined elsewhere in the same function, so we already
1037        // took its items.
1038    }
1039}
1040
1041fn make_call_args<'tcx, I: Inliner<'tcx>>(
1042    inliner: &I,
1043    args: Box<[Spanned<Operand<'tcx>>]>,
1044    callsite: &CallSite<'tcx>,
1045    caller_body: &mut Body<'tcx>,
1046    callee_body: &Body<'tcx>,
1047    return_block: Option<BasicBlock>,
1048) -> Box<[Local]> {
1049    let tcx = inliner.tcx();
1050
1051    // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
1052    // The caller provides the arguments wrapped up in a tuple:
1053    //
1054    //     tuple_tmp = (a, b, c)
1055    //     Fn::call(closure_ref, tuple_tmp)
1056    //
1057    // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
1058    // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
1059    // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
1060    // a vector like
1061    //
1062    //     [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
1063    //
1064    // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
1065    // if we "spill" that into *another* temporary, so that we can map the argument
1066    // variable in the callee MIR directly to an argument variable on our side.
1067    // So we introduce temporaries like:
1068    //
1069    //     tmp0 = tuple_tmp.0
1070    //     tmp1 = tuple_tmp.1
1071    //     tmp2 = tuple_tmp.2
1072    //
1073    // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
1074    if callsite.fn_sig.abi() == ExternAbi::RustCall && callee_body.spread_arg.is_none() {
1075        // FIXME(edition_2024): switch back to a normal method call.
1076        let mut args = <_>::into_iter(args);
1077        let self_ = create_temp_if_necessary(
1078            inliner,
1079            args.next().unwrap().node,
1080            callsite,
1081            caller_body,
1082            return_block,
1083        );
1084        let tuple = create_temp_if_necessary(
1085            inliner,
1086            args.next().unwrap().node,
1087            callsite,
1088            caller_body,
1089            return_block,
1090        );
1091        assert!(args.next().is_none());
1092
1093        let tuple = Place::from(tuple);
1094        let ty::Tuple(tuple_tys) = tuple.ty(caller_body, tcx).ty.kind() else {
1095            bug!("Closure arguments are not passed as a tuple");
1096        };
1097
1098        // The `closure_ref` in our example above.
1099        let closure_ref_arg = iter::once(self_);
1100
1101        // The `tmp0`, `tmp1`, and `tmp2` in our example above.
1102        let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
1103            // This is e.g., `tuple_tmp.0` in our example above.
1104            let tuple_field = Operand::Move(tcx.mk_place_field(tuple, FieldIdx::new(i), ty));
1105
1106            // Spill to a local to make e.g., `tmp0`.
1107            create_temp_if_necessary(inliner, tuple_field, callsite, caller_body, return_block)
1108        });
1109
1110        closure_ref_arg.chain(tuple_tmp_args).collect()
1111    } else {
1112        args.into_iter()
1113            .map(|a| create_temp_if_necessary(inliner, a.node, callsite, caller_body, return_block))
1114            .collect()
1115    }
1116}
1117
1118/// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh temporary `T` and an
1119/// instruction `T = arg`, and returns `T`.
1120fn create_temp_if_necessary<'tcx, I: Inliner<'tcx>>(
1121    inliner: &I,
1122    arg: Operand<'tcx>,
1123    callsite: &CallSite<'tcx>,
1124    caller_body: &mut Body<'tcx>,
1125    return_block: Option<BasicBlock>,
1126) -> Local {
1127    // Reuse the operand if it is a moved temporary.
1128    if let Operand::Move(place) = &arg
1129        && let Some(local) = place.as_local()
1130        && caller_body.local_kind(local) == LocalKind::Temp
1131    {
1132        return local;
1133    }
1134
1135    // Otherwise, create a temporary for the argument.
1136    trace!("creating temp for argument {:?}", arg);
1137    let arg_ty = arg.ty(caller_body, inliner.tcx());
1138    let local = new_call_temp(caller_body, callsite, arg_ty, return_block);
1139    caller_body[callsite.block].statements.push(Statement::new(
1140        callsite.source_info,
1141        StatementKind::Assign(Box::new((Place::from(local), Rvalue::Use(arg, WithRetag::Yes)))),
1142    ));
1143    local
1144}
1145
1146/// Introduces a new temporary into the caller body that is live for the duration of the call.
1147fn new_call_temp<'tcx>(
1148    caller_body: &mut Body<'tcx>,
1149    callsite: &CallSite<'tcx>,
1150    ty: Ty<'tcx>,
1151    return_block: Option<BasicBlock>,
1152) -> Local {
1153    let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span));
1154
1155    caller_body[callsite.block]
1156        .statements
1157        .push(Statement::new(callsite.source_info, StatementKind::StorageLive(local)));
1158
1159    if let Some(block) = return_block {
1160        caller_body[block]
1161            .statements
1162            .insert(0, Statement::new(callsite.source_info, StatementKind::StorageDead(local)));
1163    }
1164
1165    local
1166}
1167
1168/**
1169 * Integrator.
1170 *
1171 * Integrates blocks from the callee function into the calling function.
1172 * Updates block indices, references to locals and other control flow
1173 * stuff.
1174*/
1175struct Integrator<'a, 'tcx> {
1176    args: &'a [Local],
1177    new_locals: RangeFrom<Local>,
1178    new_scopes: RangeFrom<SourceScope>,
1179    new_blocks: RangeFrom<BasicBlock>,
1180    destination: Local,
1181    callsite_scope: SourceScopeData<'tcx>,
1182    callsite: &'a CallSite<'tcx>,
1183    cleanup_block: UnwindAction,
1184    in_cleanup_block: bool,
1185    return_block: Option<BasicBlock>,
1186    tcx: TyCtxt<'tcx>,
1187    always_live_locals: DenseBitSet<Local>,
1188}
1189
1190impl Integrator<'_, '_> {
1191    fn map_local(&self, local: Local) -> Local {
1192        let new = if local == RETURN_PLACE {
1193            self.destination
1194        } else {
1195            let idx = local.index() - 1;
1196            if idx < self.args.len() {
1197                self.args[idx]
1198            } else {
1199                self.new_locals.start + (idx - self.args.len())
1200            }
1201        };
1202        trace!("mapping local `{:?}` to `{:?}`", local, new);
1203        new
1204    }
1205
1206    fn map_scope(&self, scope: SourceScope) -> SourceScope {
1207        let new = self.new_scopes.start + scope.index();
1208        trace!("mapping scope `{:?}` to `{:?}`", scope, new);
1209        new
1210    }
1211
1212    fn map_block(&self, block: BasicBlock) -> BasicBlock {
1213        let new = self.new_blocks.start + block.index();
1214        trace!("mapping block `{:?}` to `{:?}`", block, new);
1215        new
1216    }
1217
1218    fn map_unwind(&self, unwind: UnwindAction) -> UnwindAction {
1219        if self.in_cleanup_block {
1220            match unwind {
1221                UnwindAction::Cleanup(_) | UnwindAction::Continue => {
1222                    bug!("cleanup on cleanup block");
1223                }
1224                UnwindAction::Unreachable | UnwindAction::Terminate(_) => return unwind,
1225            }
1226        }
1227
1228        match unwind {
1229            UnwindAction::Unreachable | UnwindAction::Terminate(_) => unwind,
1230            UnwindAction::Cleanup(target) => UnwindAction::Cleanup(self.map_block(target)),
1231            // Add an unwind edge to the original call's cleanup block
1232            UnwindAction::Continue => self.cleanup_block,
1233        }
1234    }
1235}
1236
1237impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
1238    fn tcx(&self) -> TyCtxt<'tcx> {
1239        self.tcx
1240    }
1241
1242    fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
1243        *local = self.map_local(*local);
1244    }
1245
1246    fn visit_source_scope_data(&mut self, scope_data: &mut SourceScopeData<'tcx>) {
1247        self.super_source_scope_data(scope_data);
1248        if scope_data.parent_scope.is_none() {
1249            // Attach the outermost callee scope as a child of the callsite
1250            // scope, via the `parent_scope` and `inlined_parent_scope` chains.
1251            scope_data.parent_scope = Some(self.callsite.source_info.scope);
1252            assert_eq!(scope_data.inlined_parent_scope, None);
1253            scope_data.inlined_parent_scope = if self.callsite_scope.inlined.is_some() {
1254                Some(self.callsite.source_info.scope)
1255            } else {
1256                self.callsite_scope.inlined_parent_scope
1257            };
1258
1259            // Mark the outermost callee scope as an inlined one.
1260            assert_eq!(scope_data.inlined, None);
1261            scope_data.inlined = Some((self.callsite.callee, self.callsite.source_info.span));
1262        } else if scope_data.inlined_parent_scope.is_none() {
1263            // Make it easy to find the scope with `inlined` set above.
1264            scope_data.inlined_parent_scope = Some(self.map_scope(OUTERMOST_SOURCE_SCOPE));
1265        }
1266    }
1267
1268    fn visit_source_scope(&mut self, scope: &mut SourceScope) {
1269        *scope = self.map_scope(*scope);
1270    }
1271
1272    fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
1273        self.in_cleanup_block = data.is_cleanup;
1274        self.super_basic_block_data(block, data);
1275        self.in_cleanup_block = false;
1276    }
1277
1278    fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
1279        if let StatementKind::StorageLive(local) | StatementKind::StorageDead(local) =
1280            statement.kind
1281        {
1282            self.always_live_locals.remove(local);
1283        }
1284        self.super_statement(statement, location);
1285    }
1286
1287    fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) {
1288        // Don't try to modify the implicit `_0` access on return (`return` terminators are
1289        // replaced down below anyways).
1290        if !matches!(terminator.kind, TerminatorKind::Return) {
1291            self.super_terminator(terminator, loc);
1292        } else {
1293            self.visit_source_info(&mut terminator.source_info);
1294        }
1295
1296        match terminator.kind {
1297            TerminatorKind::CoroutineDrop | TerminatorKind::Yield { .. } => bug!(),
1298            TerminatorKind::Goto { ref mut target } => {
1299                *target = self.map_block(*target);
1300            }
1301            TerminatorKind::SwitchInt { ref mut targets, .. } => {
1302                for tgt in targets.all_targets_mut() {
1303                    *tgt = self.map_block(*tgt);
1304                }
1305            }
1306            TerminatorKind::Drop { ref mut target, ref mut unwind, .. } => {
1307                *target = self.map_block(*target);
1308                *unwind = self.map_unwind(*unwind);
1309            }
1310            TerminatorKind::TailCall { .. } => {
1311                // check_mir_body forbids tail calls
1312                unreachable!()
1313            }
1314            TerminatorKind::Call { ref mut target, ref mut unwind, .. } => {
1315                if let Some(ref mut tgt) = *target {
1316                    *tgt = self.map_block(*tgt);
1317                }
1318                *unwind = self.map_unwind(*unwind);
1319            }
1320            TerminatorKind::Assert { ref mut target, ref mut unwind, .. } => {
1321                *target = self.map_block(*target);
1322                *unwind = self.map_unwind(*unwind);
1323            }
1324            TerminatorKind::Return => {
1325                terminator.kind = if let Some(tgt) = self.return_block {
1326                    TerminatorKind::Goto { target: tgt }
1327                } else {
1328                    TerminatorKind::Unreachable
1329                }
1330            }
1331            TerminatorKind::UnwindResume => {
1332                terminator.kind = match self.cleanup_block {
1333                    UnwindAction::Cleanup(tgt) => TerminatorKind::Goto { target: tgt },
1334                    UnwindAction::Continue => TerminatorKind::UnwindResume,
1335                    UnwindAction::Unreachable => TerminatorKind::Unreachable,
1336                    UnwindAction::Terminate(reason) => TerminatorKind::UnwindTerminate(reason),
1337                };
1338            }
1339            TerminatorKind::UnwindTerminate(_) => {}
1340            TerminatorKind::Unreachable => {}
1341            TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => {
1342                *real_target = self.map_block(*real_target);
1343                *imaginary_target = self.map_block(*imaginary_target);
1344            }
1345            TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
1346            // see the ordering of passes in the optimized_mir query.
1347            {
1348                bug!("False unwinds should have been removed before inlining")
1349            }
1350            TerminatorKind::InlineAsm { ref mut targets, ref mut unwind, .. } => {
1351                for tgt in targets.iter_mut() {
1352                    *tgt = self.map_block(*tgt);
1353                }
1354                *unwind = self.map_unwind(*unwind);
1355            }
1356        }
1357    }
1358}
1359
1360#[instrument(skip(tcx), level = "debug")]
1361fn try_instance_mir<'tcx>(
1362    tcx: TyCtxt<'tcx>,
1363    instance: InstanceKind<'tcx>,
1364) -> Result<&'tcx Body<'tcx>, &'static str> {
1365    if let ty::InstanceKind::DropGlue(_, Some(ty)) | ty::InstanceKind::AsyncDropGlueCtorShim(_, ty) =
1366        instance
1367        && let ty::Adt(def, args) = ty.kind()
1368    {
1369        let fields = def.all_fields();
1370        for field in fields {
1371            let field_ty = field.ty(tcx, args);
1372            if field_ty.has_param() && field_ty.has_aliases() {
1373                return Err("cannot build drop shim for polymorphic type");
1374            }
1375        }
1376    }
1377    Ok(tcx.instance_mir(instance))
1378}
1379
1380fn body_is_forwarder(body: &Body<'_>) -> bool {
1381    let TerminatorKind::Call { target, .. } = body.basic_blocks[START_BLOCK].terminator().kind
1382    else {
1383        return false;
1384    };
1385    if let Some(target) = target {
1386        let TerminatorKind::Return = body.basic_blocks[target].terminator().kind else {
1387            return false;
1388        };
1389    }
1390
1391    let max_blocks = if !body.is_polymorphic {
1392        2
1393    } else if target.is_none() {
1394        3
1395    } else {
1396        4
1397    };
1398    if body.basic_blocks.len() > max_blocks {
1399        return false;
1400    }
1401
1402    body.basic_blocks.iter_enumerated().all(|(bb, bb_data)| {
1403        bb == START_BLOCK
1404            || matches!(
1405                bb_data.terminator().kind,
1406                TerminatorKind::Return
1407                    | TerminatorKind::Drop { .. }
1408                    | TerminatorKind::UnwindResume
1409                    | TerminatorKind::UnwindTerminate(_)
1410            )
1411    })
1412}