Skip to main content

rustc_ast_lowering/
asm.rs

1use std::collections::hash_map::Entry;
2
3use rustc_ast::*;
4use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
5use rustc_errors::msg;
6use rustc_hir as hir;
7use rustc_hir::def::{DefKind, Res};
8use rustc_session::parse::feature_err;
9use rustc_span::{Span, sym};
10use rustc_target::asm;
11
12use super::LoweringContext;
13use super::errors::{
14    AbiSpecifiedMultipleTimes, AttSyntaxOnlyX86, ClobberAbiNotSupported,
15    InlineAsmUnsupportedTarget, InvalidAbiClobberAbi, InvalidAsmTemplateModifierConst,
16    InvalidAsmTemplateModifierLabel, InvalidAsmTemplateModifierRegClass,
17    InvalidAsmTemplateModifierRegClassSub, InvalidAsmTemplateModifierSym, InvalidRegister,
18    InvalidRegisterClass, RegisterClassOnlyClobber, RegisterClassOnlyClobberStable,
19    RegisterConflict,
20};
21use crate::{AllowReturnTypeNotation, ImplTraitContext, ImplTraitPosition, ParamMode};
22
23impl<'hir> LoweringContext<'_, 'hir> {
24    pub(crate) fn lower_inline_asm(
25        &mut self,
26        sp: Span,
27        asm: &InlineAsm,
28    ) -> &'hir hir::InlineAsm<'hir> {
29        // Rustdoc needs to support asm! from foreign architectures: don't try
30        // lowering the register constraints in this case.
31        let asm_arch =
32            if self.tcx.sess.opts.actually_rustdoc { None } else { self.tcx.sess.asm_arch };
33        if asm_arch.is_none() && !self.tcx.sess.opts.actually_rustdoc {
34            self.dcx().emit_err(InlineAsmUnsupportedTarget { span: sp });
35        }
36        if let Some(asm_arch) = asm_arch {
37            // Inline assembly is currently only stable for these architectures.
38            // (See also compiletest's `has_asm_support`.)
39            let is_stable = #[allow(non_exhaustive_omitted_patterns)] match asm_arch {
    asm::InlineAsmArch::X86 | asm::InlineAsmArch::X86_64 |
        asm::InlineAsmArch::Arm | asm::InlineAsmArch::AArch64 |
        asm::InlineAsmArch::Arm64EC | asm::InlineAsmArch::RiscV32 |
        asm::InlineAsmArch::RiscV64 | asm::InlineAsmArch::LoongArch32 |
        asm::InlineAsmArch::LoongArch64 | asm::InlineAsmArch::S390x |
        asm::InlineAsmArch::PowerPC | asm::InlineAsmArch::PowerPC64 => true,
    _ => false,
}matches!(
40                asm_arch,
41                asm::InlineAsmArch::X86
42                    | asm::InlineAsmArch::X86_64
43                    | asm::InlineAsmArch::Arm
44                    | asm::InlineAsmArch::AArch64
45                    | asm::InlineAsmArch::Arm64EC
46                    | asm::InlineAsmArch::RiscV32
47                    | asm::InlineAsmArch::RiscV64
48                    | asm::InlineAsmArch::LoongArch32
49                    | asm::InlineAsmArch::LoongArch64
50                    | asm::InlineAsmArch::S390x
51                    | asm::InlineAsmArch::PowerPC
52                    | asm::InlineAsmArch::PowerPC64
53            );
54            if !is_stable
55                && !self.tcx.features().asm_experimental_arch()
56                && sp
57                    .ctxt()
58                    .outer_expn_data()
59                    .allow_internal_unstable
60                    .filter(|features| features.contains(&sym::asm_experimental_arch))
61                    .is_none()
62            {
63                feature_err(
64                    &self.tcx.sess,
65                    sym::asm_experimental_arch,
66                    sp,
67                    rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("inline assembly is not stable yet on this architecture"))msg!("inline assembly is not stable yet on this architecture"),
68                )
69                .emit();
70            }
71        }
72        let allow_experimental_reg = self.tcx.features().asm_experimental_reg();
73        if asm.options.contains(InlineAsmOptions::ATT_SYNTAX)
74            && !#[allow(non_exhaustive_omitted_patterns)] match asm_arch {
    Some(asm::InlineAsmArch::X86 | asm::InlineAsmArch::X86_64) => true,
    _ => false,
}matches!(asm_arch, Some(asm::InlineAsmArch::X86 | asm::InlineAsmArch::X86_64))
75            && !self.tcx.sess.opts.actually_rustdoc
76        {
77            self.dcx().emit_err(AttSyntaxOnlyX86 { span: sp });
78        }
79        if asm.options.contains(InlineAsmOptions::MAY_UNWIND) && !self.tcx.features().asm_unwind() {
80            feature_err(
81                &self.tcx.sess,
82                sym::asm_unwind,
83                sp,
84                rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("the `may_unwind` option is unstable"))msg!("the `may_unwind` option is unstable"),
85            )
86            .emit();
87        }
88
89        let mut clobber_abis = FxIndexMap::default();
90        if let Some(asm_arch) = asm_arch {
91            for (abi_name, abi_span) in &asm.clobber_abis {
92                match asm::InlineAsmClobberAbi::parse(
93                    asm_arch,
94                    &self.tcx.sess.target,
95                    &self.tcx.sess.unstable_target_features,
96                    *abi_name,
97                ) {
98                    Ok(abi) => {
99                        // If the abi was already in the list, emit an error
100                        match clobber_abis.get(&abi) {
101                            Some((prev_name, prev_sp)) => {
102                                // Multiple different abi names may actually be the same ABI
103                                // If the specified ABIs are not the same name, alert the user that they resolve to the same ABI
104                                let source_map = self.tcx.sess.source_map();
105                                let equivalent = source_map.span_to_snippet(*prev_sp)
106                                    != source_map.span_to_snippet(*abi_span);
107
108                                self.dcx().emit_err(AbiSpecifiedMultipleTimes {
109                                    abi_span: *abi_span,
110                                    prev_name: *prev_name,
111                                    prev_span: *prev_sp,
112                                    equivalent,
113                                });
114                            }
115                            None => {
116                                clobber_abis.insert(abi, (*abi_name, *abi_span));
117                            }
118                        }
119                    }
120                    Err(&[]) => {
121                        self.dcx().emit_err(ClobberAbiNotSupported { abi_span: *abi_span });
122                    }
123                    Err(supported_abis) => {
124                        self.dcx().emit_err(InvalidAbiClobberAbi {
125                            abi_span: *abi_span,
126                            supported_abis: supported_abis.to_vec().into(),
127                        });
128                    }
129                }
130            }
131        }
132
133        // Lower operands to HIR. We use dummy register classes if an error
134        // occurs during lowering because we still need to be able to produce a
135        // valid HIR.
136        let sess = self.tcx.sess;
137        let mut operands: Vec<_> = asm
138            .operands
139            .iter()
140            .map(|(op, op_sp)| {
141                let lower_reg = |&reg: &_| match reg {
142                    InlineAsmRegOrRegClass::Reg(reg) => {
143                        asm::InlineAsmRegOrRegClass::Reg(if let Some(asm_arch) = asm_arch {
144                            asm::InlineAsmReg::parse(asm_arch, reg).unwrap_or_else(|error| {
145                                self.dcx().emit_err(InvalidRegister {
146                                    op_span: *op_sp,
147                                    reg,
148                                    error,
149                                });
150                                asm::InlineAsmReg::Err
151                            })
152                        } else {
153                            asm::InlineAsmReg::Err
154                        })
155                    }
156                    InlineAsmRegOrRegClass::RegClass(reg_class) => {
157                        asm::InlineAsmRegOrRegClass::RegClass(if let Some(asm_arch) = asm_arch {
158                            asm::InlineAsmRegClass::parse(asm_arch, reg_class).unwrap_or_else(
159                                |supported_register_classes| {
160                                    self.dcx().emit_err(InvalidRegisterClass {
161                                        op_span: *op_sp,
162                                        reg_class,
163                                        supported_register_classes: supported_register_classes
164                                            .to_vec()
165                                            .into(),
166                                    });
167                                    asm::InlineAsmRegClass::Err
168                                },
169                            )
170                        } else {
171                            asm::InlineAsmRegClass::Err
172                        })
173                    }
174                };
175
176                let op = match op {
177                    InlineAsmOperand::In { reg, expr } => hir::InlineAsmOperand::In {
178                        reg: lower_reg(reg),
179                        expr: self.lower_expr(expr),
180                    },
181                    InlineAsmOperand::Out { reg, late, expr } => hir::InlineAsmOperand::Out {
182                        reg: lower_reg(reg),
183                        late: *late,
184                        expr: expr.as_ref().map(|expr| self.lower_expr(expr)),
185                    },
186                    InlineAsmOperand::InOut { reg, late, expr } => hir::InlineAsmOperand::InOut {
187                        reg: lower_reg(reg),
188                        late: *late,
189                        expr: self.lower_expr(expr),
190                    },
191                    InlineAsmOperand::SplitInOut { reg, late, in_expr, out_expr } => {
192                        hir::InlineAsmOperand::SplitInOut {
193                            reg: lower_reg(reg),
194                            late: *late,
195                            in_expr: self.lower_expr(in_expr),
196                            out_expr: out_expr.as_ref().map(|expr| self.lower_expr(expr)),
197                        }
198                    }
199                    InlineAsmOperand::Const { anon_const } => hir::InlineAsmOperand::Const {
200                        anon_const: self.lower_const_block(anon_const),
201                    },
202                    InlineAsmOperand::Sym { sym } => {
203                        let static_def_id = self
204                            .get_partial_res(sym.id)
205                            .and_then(|res| res.full_res())
206                            .and_then(|res| match res {
207                                Res::Def(DefKind::Static { .. }, def_id) => Some(def_id),
208                                _ => None,
209                            });
210
211                        if let Some(def_id) = static_def_id {
212                            let path = self.lower_qpath(
213                                sym.id,
214                                &sym.qself,
215                                &sym.path,
216                                ParamMode::Optional,
217                                AllowReturnTypeNotation::No,
218                                ImplTraitContext::Disallowed(ImplTraitPosition::Path),
219                                None,
220                            );
221                            hir::InlineAsmOperand::SymStatic { path, def_id }
222                        } else {
223                            // Replace the InlineAsmSym AST node with an
224                            // Expr using the name node id.
225                            let expr = Expr {
226                                id: sym.id,
227                                kind: ExprKind::Path(sym.qself.clone(), sym.path.clone()),
228                                span: *op_sp,
229                                attrs: AttrVec::new(),
230                                tokens: None,
231                            };
232
233                            hir::InlineAsmOperand::SymFn { expr: self.lower_expr(&expr) }
234                        }
235                    }
236                    InlineAsmOperand::Label { block } => {
237                        hir::InlineAsmOperand::Label { block: self.lower_block(block, false) }
238                    }
239                };
240                (op, self.lower_span(*op_sp))
241            })
242            .collect();
243
244        // Validate template modifiers against the register classes for the operands
245        for p in &asm.template {
246            if let InlineAsmTemplatePiece::Placeholder {
247                operand_idx,
248                modifier: Some(modifier),
249                span: placeholder_span,
250            } = *p
251            {
252                let op_sp = asm.operands[operand_idx].1;
253                match &operands[operand_idx].0 {
254                    hir::InlineAsmOperand::In { reg, .. }
255                    | hir::InlineAsmOperand::Out { reg, .. }
256                    | hir::InlineAsmOperand::InOut { reg, .. }
257                    | hir::InlineAsmOperand::SplitInOut { reg, .. } => {
258                        let class = reg.reg_class();
259                        if class == asm::InlineAsmRegClass::Err {
260                            continue;
261                        }
262                        let valid_modifiers = class.valid_modifiers(asm_arch.unwrap());
263                        if !valid_modifiers.contains(&modifier) {
264                            let sub = if valid_modifiers.is_empty() {
265                                InvalidAsmTemplateModifierRegClassSub::DoesNotSupportModifier {
266                                    class_name: class.name(),
267                                }
268                            } else {
269                                InvalidAsmTemplateModifierRegClassSub::SupportModifier {
270                                    class_name: class.name(),
271                                    modifiers: valid_modifiers.to_vec().into(),
272                                }
273                            };
274                            self.dcx().emit_err(InvalidAsmTemplateModifierRegClass {
275                                placeholder_span,
276                                op_span: op_sp,
277                                modifier: modifier.to_string(),
278                                sub,
279                            });
280                        }
281                    }
282                    hir::InlineAsmOperand::Const { .. } => {
283                        self.dcx().emit_err(InvalidAsmTemplateModifierConst {
284                            placeholder_span,
285                            op_span: op_sp,
286                        });
287                    }
288                    hir::InlineAsmOperand::SymFn { .. }
289                    | hir::InlineAsmOperand::SymStatic { .. } => {
290                        self.dcx().emit_err(InvalidAsmTemplateModifierSym {
291                            placeholder_span,
292                            op_span: op_sp,
293                        });
294                    }
295                    hir::InlineAsmOperand::Label { .. } => {
296                        self.dcx().emit_err(InvalidAsmTemplateModifierLabel {
297                            placeholder_span,
298                            op_span: op_sp,
299                        });
300                    }
301                }
302            }
303        }
304
305        let mut used_input_regs = FxHashMap::default();
306        let mut used_output_regs = FxHashMap::default();
307
308        for (idx, &(ref op, op_sp)) in operands.iter().enumerate() {
309            if let Some(reg) = op.reg() {
310                let reg_class = reg.reg_class();
311                if reg_class == asm::InlineAsmRegClass::Err {
312                    continue;
313                }
314
315                // Some register classes can only be used as clobbers. This
316                // means that we disallow passing a value in/out of the asm and
317                // require that the operand name an explicit register, not a
318                // register class.
319                if reg_class.is_clobber_only(asm_arch.unwrap(), allow_experimental_reg)
320                    && !op.is_clobber()
321                {
322                    if allow_experimental_reg || reg_class.is_clobber_only(asm_arch.unwrap(), true)
323                    {
324                        // always clobber-only
325                        self.dcx().emit_err(RegisterClassOnlyClobber {
326                            op_span: op_sp,
327                            reg_class_name: reg_class.name(),
328                        });
329                    } else {
330                        // clobber-only in stable
331                        self.tcx
332                            .sess
333                            .create_feature_err(
334                                RegisterClassOnlyClobberStable {
335                                    op_span: op_sp,
336                                    reg_class_name: reg_class.name(),
337                                },
338                                sym::asm_experimental_reg,
339                            )
340                            .emit();
341                    }
342                    continue;
343                }
344
345                // Check for conflicts between explicit register operands.
346                if let asm::InlineAsmRegOrRegClass::Reg(reg) = reg {
347                    let (input, output) = match op {
348                        hir::InlineAsmOperand::In { .. } => (true, false),
349
350                        // Late output do not conflict with inputs, but normal outputs do
351                        hir::InlineAsmOperand::Out { late, .. } => (!late, true),
352
353                        hir::InlineAsmOperand::InOut { .. }
354                        | hir::InlineAsmOperand::SplitInOut { .. } => (true, true),
355
356                        hir::InlineAsmOperand::Const { .. }
357                        | hir::InlineAsmOperand::SymFn { .. }
358                        | hir::InlineAsmOperand::SymStatic { .. }
359                        | hir::InlineAsmOperand::Label { .. } => {
360                            {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("{0:?} is not a register operand", op)));
};unreachable!("{op:?} is not a register operand");
361                        }
362                    };
363
364                    // Flag to output the error only once per operand
365                    let mut skip = false;
366
367                    let mut check = |used_regs: &mut FxHashMap<asm::InlineAsmReg, usize>,
368                                     input,
369                                     r: asm::InlineAsmReg| {
370                        match used_regs.entry(r) {
371                            Entry::Occupied(o) => {
372                                if skip {
373                                    return;
374                                }
375                                skip = true;
376
377                                let idx2 = *o.get();
378                                let (ref op2, op_sp2) = operands[idx2];
379
380                                let in_out = match (op, op2) {
381                                    (
382                                        hir::InlineAsmOperand::In { .. },
383                                        hir::InlineAsmOperand::Out { late, .. },
384                                    )
385                                    | (
386                                        hir::InlineAsmOperand::Out { late, .. },
387                                        hir::InlineAsmOperand::In { .. },
388                                    ) => {
389                                        if !!*late { ::core::panicking::panic("assertion failed: !*late") };assert!(!*late);
390                                        let out_op_sp = if input { op_sp2 } else { op_sp };
391                                        Some(out_op_sp)
392                                    }
393                                    _ => None,
394                                };
395                                let reg_str = |idx| -> &str {
396                                    // HIR asm doesn't preserve the original alias string of the explicit register,
397                                    // so we have to retrieve it from AST
398                                    let (op, _): &(InlineAsmOperand, Span) = &asm.operands[idx];
399                                    if let Some(ast::InlineAsmRegOrRegClass::Reg(reg_sym)) =
400                                        op.reg()
401                                    {
402                                        reg_sym.as_str()
403                                    } else {
404                                        {
    ::core::panicking::panic_fmt(format_args!("internal error: entered unreachable code: {0}",
            format_args!("{0:?} is not a register operand", op)));
};unreachable!("{op:?} is not a register operand");
405                                    }
406                                };
407
408                                self.dcx().emit_err(RegisterConflict {
409                                    op_span1: op_sp,
410                                    op_span2: op_sp2,
411                                    reg1_name: reg_str(idx),
412                                    reg2_name: reg_str(idx2),
413                                    in_out,
414                                });
415                            }
416                            Entry::Vacant(v) => {
417                                if r == reg {
418                                    v.insert(idx);
419                                }
420                            }
421                        }
422                    };
423                    let mut overlapping_with = ::alloc::vec::Vec::new()vec![];
424                    reg.overlapping_regs(|r| {
425                        overlapping_with.push(r);
426                    });
427                    for r in overlapping_with {
428                        if input {
429                            check(&mut used_input_regs, true, r);
430                        }
431                        if output {
432                            check(&mut used_output_regs, false, r);
433                        }
434                    }
435                }
436            }
437        }
438
439        // If a clobber_abi is specified, add the necessary clobbers to the
440        // operands list.
441        let mut clobbered = FxHashSet::default();
442        for (abi, (_, abi_span)) in clobber_abis {
443            for &clobber in abi.clobbered_regs() {
444                // Don't emit a clobber for a register already clobbered
445                if clobbered.contains(&clobber) {
446                    continue;
447                }
448
449                let mut overlapping_with = ::alloc::vec::Vec::new()vec![];
450                clobber.overlapping_regs(|reg| {
451                    overlapping_with.push(reg);
452                });
453                let output_used =
454                    overlapping_with.iter().any(|reg| used_output_regs.contains_key(&reg));
455
456                if !output_used {
457                    operands.push((
458                        hir::InlineAsmOperand::Out {
459                            reg: asm::InlineAsmRegOrRegClass::Reg(clobber),
460                            late: true,
461                            expr: None,
462                        },
463                        self.lower_span(abi_span),
464                    ));
465                    clobbered.insert(clobber);
466                }
467            }
468        }
469
470        // Feature gate checking for `asm_goto_with_outputs`.
471        if let Some((_, op_sp)) =
472            operands.iter().find(|(op, _)| #[allow(non_exhaustive_omitted_patterns)] match op {
    hir::InlineAsmOperand::Label { .. } => true,
    _ => false,
}matches!(op, hir::InlineAsmOperand::Label { .. }))
473        {
474            // Check if an output operand is used.
475            let output_operand_used = operands.iter().any(|(op, _)| {
476                #[allow(non_exhaustive_omitted_patterns)] match op {
    hir::InlineAsmOperand::Out { expr: Some(_), .. } |
        hir::InlineAsmOperand::InOut { .. } |
        hir::InlineAsmOperand::SplitInOut { out_expr: Some(_), .. } => true,
    _ => false,
}matches!(
477                    op,
478                    hir::InlineAsmOperand::Out { expr: Some(_), .. }
479                        | hir::InlineAsmOperand::InOut { .. }
480                        | hir::InlineAsmOperand::SplitInOut { out_expr: Some(_), .. }
481                )
482            });
483            if output_operand_used && !self.tcx.features().asm_goto_with_outputs() {
484                feature_err(
485                    sess,
486                    sym::asm_goto_with_outputs,
487                    *op_sp,
488                    rustc_errors::DiagMessage::Inline(std::borrow::Cow::Borrowed("using both label and output operands for inline assembly is unstable"))msg!("using both label and output operands for inline assembly is unstable"),
489                )
490                .emit();
491            }
492        }
493
494        let operands = self.arena.alloc_from_iter(operands);
495        let template = self.arena.alloc_from_iter(asm.template.iter().cloned());
496        let template_strs = self.arena.alloc_from_iter(
497            asm.template_strs
498                .iter()
499                .map(|(sym, snippet, span)| (*sym, *snippet, self.lower_span(*span))),
500        );
501        let line_spans =
502            self.arena.alloc_from_iter(asm.line_spans.iter().map(|span| self.lower_span(*span)));
503        let hir_asm = hir::InlineAsm {
504            asm_macro: asm.asm_macro,
505            template,
506            template_strs,
507            operands,
508            options: asm.options,
509            line_spans,
510        };
511        self.arena.alloc(hir_asm)
512    }
513}