Skip to main content

rustc_codegen_ssa/back/
write.rs

1use std::marker::PhantomData;
2use std::panic::AssertUnwindSafe;
3use std::path::{Path, PathBuf};
4use std::sync::Arc;
5use std::sync::mpsc::{Receiver, Sender, channel};
6use std::{assert_matches, fs, io, mem, str, thread};
7
8use rustc_abi::Size;
9use rustc_data_structures::fx::FxIndexMap;
10use rustc_data_structures::jobserver::{self, Acquired};
11use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
12use rustc_errors::emitter::Emitter;
13use rustc_errors::{
14    Diag, DiagArgMap, DiagCtxt, DiagCtxtHandle, DiagMessage, ErrCode, FatalError, FatalErrorMarker,
15    Level, MultiSpan, Style, Suggestions, catch_fatal_errors,
16};
17use rustc_fs_util::link_or_copy;
18use rustc_hir::find_attr;
19use rustc_incremental::{
20    copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
21};
22use rustc_macros::{Decodable, Encodable};
23use rustc_metadata::fs::copy_to_stdout;
24use rustc_middle::bug;
25use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
26use rustc_middle::ty::TyCtxt;
27use rustc_session::Session;
28use rustc_session::config::{
29    self, CrateType, Lto, OptLevel, OutFileName, OutputFilenames, OutputType, Passes,
30    SwitchWithOptPath,
31};
32use rustc_span::source_map::SourceMap;
33use rustc_span::{FileName, InnerSpan, Span, SpanData};
34use rustc_target::spec::{MergeFunctions, SanitizerSet};
35use tracing::debug;
36
37use crate::back::link::{self, ensure_removed};
38use crate::back::lto::{self, SerializedModule, check_lto_allowed};
39use crate::errors::ErrorCreatingRemarkDir;
40use crate::traits::*;
41use crate::{
42    CachedModuleCodegen, CompiledModule, CompiledModules, CrateInfo, ModuleCodegen, ModuleKind,
43    errors,
44};
45
46const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
47
48/// What kind of object file to emit.
49#[derive(#[automatically_derived]
impl ::core::clone::Clone for EmitObj {
    #[inline]
    fn clone(&self) -> EmitObj {
        let _: ::core::clone::AssertParamIsClone<BitcodeSection>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for EmitObj { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for EmitObj {
    #[inline]
    fn eq(&self, other: &EmitObj) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr &&
            match (self, other) {
                (EmitObj::ObjectCode(__self_0), EmitObj::ObjectCode(__arg1_0))
                    => __self_0 == __arg1_0,
                _ => true,
            }
    }
}PartialEq, const _: () =
    {
        impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
            for EmitObj {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        EmitObj::None => { 0usize }
                        EmitObj::Bitcode => { 1usize }
                        EmitObj::ObjectCode(ref __binding_0) => { 2usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    EmitObj::None => {}
                    EmitObj::Bitcode => {}
                    EmitObj::ObjectCode(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable, const _: () =
    {
        impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
            for EmitObj {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => { EmitObj::None }
                    1usize => { EmitObj::Bitcode }
                    2usize => {
                        EmitObj::ObjectCode(::rustc_serialize::Decodable::decode(__decoder))
                    }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `EmitObj`, expected 0..3, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable)]
50pub enum EmitObj {
51    // No object file.
52    None,
53
54    // Just uncompressed llvm bitcode. Provides easy compatibility with
55    // emscripten's ecc compiler, when used as the linker.
56    Bitcode,
57
58    // Object code, possibly augmented with a bitcode section.
59    ObjectCode(BitcodeSection),
60}
61
62/// What kind of llvm bitcode section to embed in an object file.
63#[derive(#[automatically_derived]
impl ::core::clone::Clone for BitcodeSection {
    #[inline]
    fn clone(&self) -> BitcodeSection { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for BitcodeSection { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for BitcodeSection {
    #[inline]
    fn eq(&self, other: &BitcodeSection) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, const _: () =
    {
        impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
            for BitcodeSection {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        BitcodeSection::None => { 0usize }
                        BitcodeSection::Full => { 1usize }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    BitcodeSection::None => {}
                    BitcodeSection::Full => {}
                }
            }
        }
    };Encodable, const _: () =
    {
        impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
            for BitcodeSection {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => { BitcodeSection::None }
                    1usize => { BitcodeSection::Full }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `BitcodeSection`, expected 0..2, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable)]
64pub enum BitcodeSection {
65    // No bitcode section.
66    None,
67
68    // A full, uncompressed bitcode section.
69    Full,
70}
71
72/// Module-specific configuration for `optimize_and_codegen`.
73#[derive(const _: () =
    {
        impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
            for ModuleConfig {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    ModuleConfig {
                        passes: ref __binding_0,
                        opt_level: ref __binding_1,
                        pgo_gen: ref __binding_2,
                        pgo_use: ref __binding_3,
                        pgo_sample_use: ref __binding_4,
                        debug_info_for_profiling: ref __binding_5,
                        instrument_coverage: ref __binding_6,
                        sanitizer: ref __binding_7,
                        sanitizer_recover: ref __binding_8,
                        sanitizer_dataflow_abilist: ref __binding_9,
                        sanitizer_memory_track_origins: ref __binding_10,
                        emit_pre_lto_bc: ref __binding_11,
                        emit_bc: ref __binding_12,
                        emit_ir: ref __binding_13,
                        emit_asm: ref __binding_14,
                        emit_obj: ref __binding_15,
                        emit_thin_lto_summary: ref __binding_16,
                        verify_llvm_ir: ref __binding_17,
                        lint_llvm_ir: ref __binding_18,
                        no_prepopulate_passes: ref __binding_19,
                        no_builtins: ref __binding_20,
                        vectorize_loop: ref __binding_21,
                        vectorize_slp: ref __binding_22,
                        merge_functions: ref __binding_23,
                        emit_lifetime_markers: ref __binding_24,
                        llvm_plugins: ref __binding_25,
                        autodiff: ref __binding_26,
                        offload: ref __binding_27 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_2,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_3,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_4,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_5,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_6,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_7,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_8,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_9,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_10,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_11,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_12,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_13,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_14,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_15,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_16,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_17,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_18,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_19,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_20,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_21,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_22,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_23,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_24,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_25,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_26,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_27,
                            __encoder);
                    }
                }
            }
        }
    };Encodable, const _: () =
    {
        impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
            for ModuleConfig {
            fn decode(__decoder: &mut __D) -> Self {
                ModuleConfig {
                    passes: ::rustc_serialize::Decodable::decode(__decoder),
                    opt_level: ::rustc_serialize::Decodable::decode(__decoder),
                    pgo_gen: ::rustc_serialize::Decodable::decode(__decoder),
                    pgo_use: ::rustc_serialize::Decodable::decode(__decoder),
                    pgo_sample_use: ::rustc_serialize::Decodable::decode(__decoder),
                    debug_info_for_profiling: ::rustc_serialize::Decodable::decode(__decoder),
                    instrument_coverage: ::rustc_serialize::Decodable::decode(__decoder),
                    sanitizer: ::rustc_serialize::Decodable::decode(__decoder),
                    sanitizer_recover: ::rustc_serialize::Decodable::decode(__decoder),
                    sanitizer_dataflow_abilist: ::rustc_serialize::Decodable::decode(__decoder),
                    sanitizer_memory_track_origins: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_pre_lto_bc: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_bc: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_ir: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_asm: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_obj: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_thin_lto_summary: ::rustc_serialize::Decodable::decode(__decoder),
                    verify_llvm_ir: ::rustc_serialize::Decodable::decode(__decoder),
                    lint_llvm_ir: ::rustc_serialize::Decodable::decode(__decoder),
                    no_prepopulate_passes: ::rustc_serialize::Decodable::decode(__decoder),
                    no_builtins: ::rustc_serialize::Decodable::decode(__decoder),
                    vectorize_loop: ::rustc_serialize::Decodable::decode(__decoder),
                    vectorize_slp: ::rustc_serialize::Decodable::decode(__decoder),
                    merge_functions: ::rustc_serialize::Decodable::decode(__decoder),
                    emit_lifetime_markers: ::rustc_serialize::Decodable::decode(__decoder),
                    llvm_plugins: ::rustc_serialize::Decodable::decode(__decoder),
                    autodiff: ::rustc_serialize::Decodable::decode(__decoder),
                    offload: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable)]
74pub struct ModuleConfig {
75    /// Names of additional optimization passes to run.
76    pub passes: Vec<String>,
77    /// Some(level) to optimize at a certain level, or None to run
78    /// absolutely no optimizations (used for the allocator module).
79    pub opt_level: Option<config::OptLevel>,
80
81    pub pgo_gen: SwitchWithOptPath,
82    pub pgo_use: Option<PathBuf>,
83    pub pgo_sample_use: Option<PathBuf>,
84    pub debug_info_for_profiling: bool,
85    pub instrument_coverage: bool,
86
87    pub sanitizer: SanitizerSet,
88    pub sanitizer_recover: SanitizerSet,
89    pub sanitizer_dataflow_abilist: Vec<String>,
90    pub sanitizer_memory_track_origins: usize,
91
92    // Flags indicating which outputs to produce.
93    pub emit_pre_lto_bc: bool,
94    pub emit_bc: bool,
95    pub emit_ir: bool,
96    pub emit_asm: bool,
97    pub emit_obj: EmitObj,
98    pub emit_thin_lto_summary: bool,
99
100    // Miscellaneous flags. These are mostly copied from command-line
101    // options.
102    pub verify_llvm_ir: bool,
103    pub lint_llvm_ir: bool,
104    pub no_prepopulate_passes: bool,
105    pub no_builtins: bool,
106    pub vectorize_loop: bool,
107    pub vectorize_slp: bool,
108    pub merge_functions: bool,
109    pub emit_lifetime_markers: bool,
110    pub llvm_plugins: Vec<String>,
111    pub autodiff: Vec<config::AutoDiff>,
112    pub offload: Vec<config::Offload>,
113}
114
115impl ModuleConfig {
116    fn new(kind: ModuleKind, tcx: TyCtxt<'_>, no_builtins: bool) -> ModuleConfig {
117        // If it's a regular module, use `$regular`, otherwise use `$other`.
118        // `$regular` and `$other` are evaluated lazily.
119        macro_rules! if_regular {
120            ($regular: expr, $other: expr) => {
121                if let ModuleKind::Regular = kind { $regular } else { $other }
122            };
123        }
124
125        let sess = tcx.sess;
126        let opt_level_and_size = if let ModuleKind::Regular = kind { Some(sess.opts.optimize) } else { None }if_regular!(Some(sess.opts.optimize), None);
127
128        let save_temps = sess.opts.cg.save_temps;
129
130        let should_emit_obj = sess.opts.output_types.contains_key(&OutputType::Exe)
131            || match kind {
132                ModuleKind::Regular => sess.opts.output_types.contains_key(&OutputType::Object),
133                ModuleKind::Allocator => false,
134            };
135
136        let emit_obj = if !should_emit_obj {
137            EmitObj::None
138        } else if sess.target.obj_is_bitcode
139            || (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins)
140        {
141            // This case is selected if the target uses objects as bitcode, or
142            // if linker plugin LTO is enabled. In the linker plugin LTO case
143            // the assumption is that the final link-step will read the bitcode
144            // and convert it to object code. This may be done by either the
145            // native linker or rustc itself.
146            //
147            // Note, however, that the linker-plugin-lto requested here is
148            // explicitly ignored for `#![no_builtins]` crates. These crates are
149            // specifically ignored by rustc's LTO passes and wouldn't work if
150            // loaded into the linker. These crates define symbols that LLVM
151            // lowers intrinsics to, and these symbol dependencies aren't known
152            // until after codegen. As a result any crate marked
153            // `#![no_builtins]` is assumed to not participate in LTO and
154            // instead goes on to generate object code.
155            EmitObj::Bitcode
156        } else if need_bitcode_in_object(tcx) || sess.target.requires_lto {
157            EmitObj::ObjectCode(BitcodeSection::Full)
158        } else {
159            EmitObj::ObjectCode(BitcodeSection::None)
160        };
161
162        ModuleConfig {
163            passes: if let ModuleKind::Regular = kind {
    sess.opts.cg.passes.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.cg.passes.clone(), vec![]),
164
165            opt_level: opt_level_and_size,
166
167            pgo_gen: if let ModuleKind::Regular = kind {
    sess.opts.cg.profile_generate.clone()
} else { SwitchWithOptPath::Disabled }if_regular!(
168                sess.opts.cg.profile_generate.clone(),
169                SwitchWithOptPath::Disabled
170            ),
171            pgo_use: if let ModuleKind::Regular = kind {
    sess.opts.cg.profile_use.clone()
} else { None }if_regular!(sess.opts.cg.profile_use.clone(), None),
172            pgo_sample_use: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.profile_sample_use.clone()
} else { None }if_regular!(sess.opts.unstable_opts.profile_sample_use.clone(), None),
173            debug_info_for_profiling: sess.opts.unstable_opts.debug_info_for_profiling,
174            instrument_coverage: if let ModuleKind::Regular = kind {
    sess.instrument_coverage()
} else { false }if_regular!(sess.instrument_coverage(), false),
175
176            sanitizer: if let ModuleKind::Regular = kind {
    sess.sanitizers()
} else { SanitizerSet::empty() }if_regular!(sess.sanitizers(), SanitizerSet::empty()),
177            sanitizer_dataflow_abilist: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.sanitizer_dataflow_abilist.clone()
} else { Vec::new() }if_regular!(
178                sess.opts.unstable_opts.sanitizer_dataflow_abilist.clone(),
179                Vec::new()
180            ),
181            sanitizer_recover: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.sanitizer_recover
} else { SanitizerSet::empty() }if_regular!(
182                sess.opts.unstable_opts.sanitizer_recover,
183                SanitizerSet::empty()
184            ),
185            sanitizer_memory_track_origins: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.sanitizer_memory_track_origins
} else { 0 }if_regular!(
186                sess.opts.unstable_opts.sanitizer_memory_track_origins,
187                0
188            ),
189
190            emit_pre_lto_bc: if let ModuleKind::Regular = kind {
    save_temps || need_pre_lto_bitcode_for_incr_comp(sess)
} else { false }if_regular!(
191                save_temps || need_pre_lto_bitcode_for_incr_comp(sess),
192                false
193            ),
194            emit_bc: if let ModuleKind::Regular = kind {
    save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode)
} else { save_temps }if_regular!(
195                save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode),
196                save_temps
197            ),
198            emit_ir: if let ModuleKind::Regular = kind {
    sess.opts.output_types.contains_key(&OutputType::LlvmAssembly)
} else { false }if_regular!(
199                sess.opts.output_types.contains_key(&OutputType::LlvmAssembly),
200                false
201            ),
202            emit_asm: if let ModuleKind::Regular = kind {
    sess.opts.output_types.contains_key(&OutputType::Assembly)
} else { false }if_regular!(
203                sess.opts.output_types.contains_key(&OutputType::Assembly),
204                false
205            ),
206            emit_obj,
207            emit_thin_lto_summary: if let ModuleKind::Regular = kind {
    sess.opts.output_types.contains_key(&OutputType::ThinLinkBitcode)
} else { false }if_regular!(
208                sess.opts.output_types.contains_key(&OutputType::ThinLinkBitcode),
209                false
210            ),
211
212            verify_llvm_ir: sess.verify_llvm_ir(),
213            lint_llvm_ir: sess.opts.unstable_opts.lint_llvm_ir,
214            no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
215            no_builtins: no_builtins || sess.target.no_builtins,
216
217            // Copy what clang does by turning on loop vectorization at O2 and
218            // slp vectorization at O3.
219            vectorize_loop: !sess.opts.cg.no_vectorize_loops
220                && (sess.opts.optimize == config::OptLevel::More
221                    || sess.opts.optimize == config::OptLevel::Aggressive),
222            vectorize_slp: !sess.opts.cg.no_vectorize_slp
223                && sess.opts.optimize == config::OptLevel::Aggressive,
224
225            // Some targets (namely, NVPTX) interact badly with the
226            // MergeFunctions pass. This is because MergeFunctions can generate
227            // new function calls which may interfere with the target calling
228            // convention; e.g. for the NVPTX target, PTX kernels should not
229            // call other PTX kernels. MergeFunctions can also be configured to
230            // generate aliases instead, but aliases are not supported by some
231            // backends (again, NVPTX). Therefore, allow targets to opt out of
232            // the MergeFunctions pass, but otherwise keep the pass enabled (at
233            // O2 and O3) since it can be useful for reducing code size.
234            merge_functions: match sess
235                .opts
236                .unstable_opts
237                .merge_functions
238                .unwrap_or(sess.target.merge_functions)
239            {
240                MergeFunctions::Disabled => false,
241                MergeFunctions::Trampolines | MergeFunctions::Aliases => {
242                    use config::OptLevel::*;
243                    match sess.opts.optimize {
244                        Aggressive | More | SizeMin | Size => true,
245                        Less | No => false,
246                    }
247                }
248            },
249
250            emit_lifetime_markers: sess.emit_lifetime_markers(),
251            llvm_plugins: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.llvm_plugins.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
252            autodiff: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.autodiff.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.autodiff.clone(), vec![]),
253            offload: if let ModuleKind::Regular = kind {
    sess.opts.unstable_opts.offload.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.offload.clone(), vec![]),
254        }
255    }
256
257    pub fn bitcode_needed(&self) -> bool {
258        self.emit_bc
259            || self.emit_thin_lto_summary
260            || self.emit_obj == EmitObj::Bitcode
261            || self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
262    }
263
264    pub fn embed_bitcode(&self) -> bool {
265        self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
266    }
267}
268
269/// Configuration passed to the function returned by the `target_machine_factory`.
270pub struct TargetMachineFactoryConfig {
271    /// Split DWARF is enabled in LLVM by checking that `TM.MCOptions.SplitDwarfFile` isn't empty,
272    /// so the path to the dwarf object has to be provided when we create the target machine.
273    /// This can be ignored by backends which do not need it for their Split DWARF support.
274    pub split_dwarf_file: Option<PathBuf>,
275
276    /// The name of the output object file. Used for setting OutputFilenames in target options
277    /// so that LLVM can emit the CodeView S_OBJNAME record in pdb files
278    pub output_obj_file: Option<PathBuf>,
279}
280
281impl TargetMachineFactoryConfig {
282    pub fn new(cgcx: &CodegenContext, module_name: &str) -> TargetMachineFactoryConfig {
283        let split_dwarf_file = if cgcx.target_can_use_split_dwarf {
284            cgcx.output_filenames.split_dwarf_path(
285                cgcx.split_debuginfo,
286                cgcx.split_dwarf_kind,
287                module_name,
288                cgcx.invocation_temp.as_deref(),
289            )
290        } else {
291            None
292        };
293
294        let output_obj_file = Some(cgcx.output_filenames.temp_path_for_cgu(
295            OutputType::Object,
296            module_name,
297            cgcx.invocation_temp.as_deref(),
298        ));
299        TargetMachineFactoryConfig { split_dwarf_file, output_obj_file }
300    }
301}
302
303pub type TargetMachineFactoryFn<B> = Arc<
304    dyn Fn(
305            DiagCtxtHandle<'_>,
306            TargetMachineFactoryConfig,
307        ) -> <B as WriteBackendMethods>::TargetMachine
308        + Send
309        + Sync,
310>;
311
312/// Additional resources used by optimize_and_codegen (not module specific)
313#[derive(#[automatically_derived]
impl ::core::clone::Clone for CodegenContext {
    #[inline]
    fn clone(&self) -> CodegenContext {
        CodegenContext {
            lto: ::core::clone::Clone::clone(&self.lto),
            use_linker_plugin_lto: ::core::clone::Clone::clone(&self.use_linker_plugin_lto),
            dylib_lto: ::core::clone::Clone::clone(&self.dylib_lto),
            prefer_dynamic: ::core::clone::Clone::clone(&self.prefer_dynamic),
            save_temps: ::core::clone::Clone::clone(&self.save_temps),
            fewer_names: ::core::clone::Clone::clone(&self.fewer_names),
            time_trace: ::core::clone::Clone::clone(&self.time_trace),
            crate_types: ::core::clone::Clone::clone(&self.crate_types),
            output_filenames: ::core::clone::Clone::clone(&self.output_filenames),
            invocation_temp: ::core::clone::Clone::clone(&self.invocation_temp),
            module_config: ::core::clone::Clone::clone(&self.module_config),
            opt_level: ::core::clone::Clone::clone(&self.opt_level),
            backend_features: ::core::clone::Clone::clone(&self.backend_features),
            msvc_imps_needed: ::core::clone::Clone::clone(&self.msvc_imps_needed),
            is_pe_coff: ::core::clone::Clone::clone(&self.is_pe_coff),
            target_can_use_split_dwarf: ::core::clone::Clone::clone(&self.target_can_use_split_dwarf),
            target_arch: ::core::clone::Clone::clone(&self.target_arch),
            target_is_like_darwin: ::core::clone::Clone::clone(&self.target_is_like_darwin),
            target_is_like_aix: ::core::clone::Clone::clone(&self.target_is_like_aix),
            target_is_like_gpu: ::core::clone::Clone::clone(&self.target_is_like_gpu),
            split_debuginfo: ::core::clone::Clone::clone(&self.split_debuginfo),
            split_dwarf_kind: ::core::clone::Clone::clone(&self.split_dwarf_kind),
            pointer_size: ::core::clone::Clone::clone(&self.pointer_size),
            remark: ::core::clone::Clone::clone(&self.remark),
            remark_dir: ::core::clone::Clone::clone(&self.remark_dir),
            incr_comp_session_dir: ::core::clone::Clone::clone(&self.incr_comp_session_dir),
            parallel: ::core::clone::Clone::clone(&self.parallel),
        }
    }
}Clone, const _: () =
    {
        impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
            for CodegenContext {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    CodegenContext {
                        lto: ref __binding_0,
                        use_linker_plugin_lto: ref __binding_1,
                        dylib_lto: ref __binding_2,
                        prefer_dynamic: ref __binding_3,
                        save_temps: ref __binding_4,
                        fewer_names: ref __binding_5,
                        time_trace: ref __binding_6,
                        crate_types: ref __binding_7,
                        output_filenames: ref __binding_8,
                        invocation_temp: ref __binding_9,
                        module_config: ref __binding_10,
                        opt_level: ref __binding_11,
                        backend_features: ref __binding_12,
                        msvc_imps_needed: ref __binding_13,
                        is_pe_coff: ref __binding_14,
                        target_can_use_split_dwarf: ref __binding_15,
                        target_arch: ref __binding_16,
                        target_is_like_darwin: ref __binding_17,
                        target_is_like_aix: ref __binding_18,
                        target_is_like_gpu: ref __binding_19,
                        split_debuginfo: ref __binding_20,
                        split_dwarf_kind: ref __binding_21,
                        pointer_size: ref __binding_22,
                        remark: ref __binding_23,
                        remark_dir: ref __binding_24,
                        incr_comp_session_dir: ref __binding_25,
                        parallel: ref __binding_26 } => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_2,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_3,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_4,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_5,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_6,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_7,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_8,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_9,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_10,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_11,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_12,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_13,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_14,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_15,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_16,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_17,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_18,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_19,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_20,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_21,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_22,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_23,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_24,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_25,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_26,
                            __encoder);
                    }
                }
            }
        }
    };Encodable, const _: () =
    {
        impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
            for CodegenContext {
            fn decode(__decoder: &mut __D) -> Self {
                CodegenContext {
                    lto: ::rustc_serialize::Decodable::decode(__decoder),
                    use_linker_plugin_lto: ::rustc_serialize::Decodable::decode(__decoder),
                    dylib_lto: ::rustc_serialize::Decodable::decode(__decoder),
                    prefer_dynamic: ::rustc_serialize::Decodable::decode(__decoder),
                    save_temps: ::rustc_serialize::Decodable::decode(__decoder),
                    fewer_names: ::rustc_serialize::Decodable::decode(__decoder),
                    time_trace: ::rustc_serialize::Decodable::decode(__decoder),
                    crate_types: ::rustc_serialize::Decodable::decode(__decoder),
                    output_filenames: ::rustc_serialize::Decodable::decode(__decoder),
                    invocation_temp: ::rustc_serialize::Decodable::decode(__decoder),
                    module_config: ::rustc_serialize::Decodable::decode(__decoder),
                    opt_level: ::rustc_serialize::Decodable::decode(__decoder),
                    backend_features: ::rustc_serialize::Decodable::decode(__decoder),
                    msvc_imps_needed: ::rustc_serialize::Decodable::decode(__decoder),
                    is_pe_coff: ::rustc_serialize::Decodable::decode(__decoder),
                    target_can_use_split_dwarf: ::rustc_serialize::Decodable::decode(__decoder),
                    target_arch: ::rustc_serialize::Decodable::decode(__decoder),
                    target_is_like_darwin: ::rustc_serialize::Decodable::decode(__decoder),
                    target_is_like_aix: ::rustc_serialize::Decodable::decode(__decoder),
                    target_is_like_gpu: ::rustc_serialize::Decodable::decode(__decoder),
                    split_debuginfo: ::rustc_serialize::Decodable::decode(__decoder),
                    split_dwarf_kind: ::rustc_serialize::Decodable::decode(__decoder),
                    pointer_size: ::rustc_serialize::Decodable::decode(__decoder),
                    remark: ::rustc_serialize::Decodable::decode(__decoder),
                    remark_dir: ::rustc_serialize::Decodable::decode(__decoder),
                    incr_comp_session_dir: ::rustc_serialize::Decodable::decode(__decoder),
                    parallel: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable)]
314pub struct CodegenContext {
315    // Resources needed when running LTO
316    pub lto: Lto,
317    pub use_linker_plugin_lto: bool,
318    pub dylib_lto: bool,
319    pub prefer_dynamic: bool,
320    pub save_temps: bool,
321    pub fewer_names: bool,
322    pub time_trace: bool,
323    pub crate_types: Vec<CrateType>,
324    pub output_filenames: Arc<OutputFilenames>,
325    pub invocation_temp: Option<String>,
326    pub module_config: Arc<ModuleConfig>,
327    pub opt_level: OptLevel,
328    pub backend_features: Vec<String>,
329    pub msvc_imps_needed: bool,
330    pub is_pe_coff: bool,
331    pub target_can_use_split_dwarf: bool,
332    pub target_arch: String,
333    pub target_is_like_darwin: bool,
334    pub target_is_like_aix: bool,
335    pub target_is_like_gpu: bool,
336    pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
337    pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
338    pub pointer_size: Size,
339
340    /// LLVM optimizations for which we want to print remarks.
341    pub remark: Passes,
342    /// Directory into which should the LLVM optimization remarks be written.
343    /// If `None`, they will be written to stderr.
344    pub remark_dir: Option<PathBuf>,
345    /// The incremental compilation session directory, or None if we are not
346    /// compiling incrementally
347    pub incr_comp_session_dir: Option<PathBuf>,
348    /// `true` if the codegen should be run in parallel.
349    ///
350    /// Depends on [`ExtraBackendMethods::supports_parallel()`] and `-Zno_parallel_backend`.
351    pub parallel: bool,
352}
353
354fn generate_thin_lto_work<B: WriteBackendMethods>(
355    cgcx: &CodegenContext,
356    prof: &SelfProfilerRef,
357    dcx: DiagCtxtHandle<'_>,
358    exported_symbols_for_lto: &[String],
359    each_linked_rlib_for_lto: &[PathBuf],
360    needs_thin_lto: Vec<ThinLtoInput<B>>,
361) -> Vec<(ThinLtoWorkItem<B>, u64)> {
362    let _prof_timer = prof.generic_activity("codegen_thin_generate_lto_work");
363
364    let (lto_modules, copy_jobs) = B::run_thin_lto(
365        cgcx,
366        prof,
367        dcx,
368        exported_symbols_for_lto,
369        each_linked_rlib_for_lto,
370        needs_thin_lto,
371    );
372    lto_modules
373        .into_iter()
374        .map(|module| {
375            let cost = module.cost();
376            (ThinLtoWorkItem::ThinLto(module), cost)
377        })
378        .chain(copy_jobs.into_iter().map(|wp| {
379            (
380                ThinLtoWorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
381                    name: wp.cgu_name.clone(),
382                    source: wp,
383                }),
384                0, // copying is very cheap
385            )
386        }))
387        .collect()
388}
389
390enum MaybeLtoModules<B: WriteBackendMethods> {
391    NoLto(CompiledModules),
392    FatLto {
393        cgcx: CodegenContext,
394        exported_symbols_for_lto: Arc<Vec<String>>,
395        each_linked_rlib_file_for_lto: Vec<PathBuf>,
396        needs_fat_lto: Vec<FatLtoInput<B>>,
397    },
398    ThinLto {
399        cgcx: CodegenContext,
400        exported_symbols_for_lto: Arc<Vec<String>>,
401        each_linked_rlib_file_for_lto: Vec<PathBuf>,
402        needs_thin_lto: Vec<ThinLtoInput<B>>,
403    },
404}
405
406fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool {
407    let sess = tcx.sess;
408    sess.opts.cg.embed_bitcode
409        && tcx.crate_types().contains(&CrateType::Rlib)
410        && sess.opts.output_types.contains_key(&OutputType::Exe)
411}
412
413fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
414    if sess.opts.incremental.is_none() {
415        return false;
416    }
417
418    match sess.lto() {
419        Lto::No => false,
420        Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
421    }
422}
423
424pub(crate) fn start_async_codegen<B: ExtraBackendMethods>(
425    backend: B,
426    tcx: TyCtxt<'_>,
427    crate_info: &CrateInfo,
428    allocator_module: Option<ModuleCodegen<B::Module>>,
429) -> OngoingCodegen<B> {
430    let (coordinator_send, coordinator_receive) = channel();
431
432    let no_builtins = {
        'done:
            {
            for i in tcx.hir_krate_attrs() {
                #[allow(unused_imports)]
                use rustc_hir::attrs::AttributeKind::*;
                let i: &rustc_hir::Attribute = i;
                match i {
                    rustc_hir::Attribute::Parsed(NoBuiltins) => {
                        break 'done Some(());
                    }
                    rustc_hir::Attribute::Unparsed(..) =>
                        {}
                        #[deny(unreachable_patterns)]
                        _ => {}
                }
            }
            None
        }
    }.is_some()find_attr!(tcx, crate, NoBuiltins);
433
434    let regular_config = ModuleConfig::new(ModuleKind::Regular, tcx, no_builtins);
435    let allocator_config = ModuleConfig::new(ModuleKind::Allocator, tcx, no_builtins);
436
437    let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
438    let (codegen_worker_send, codegen_worker_receive) = channel();
439
440    let coordinator_thread = start_executing_work(
441        backend.clone(),
442        tcx,
443        crate_info,
444        shared_emitter,
445        codegen_worker_send,
446        coordinator_receive,
447        Arc::new(regular_config),
448        Arc::new(allocator_config),
449        allocator_module,
450        coordinator_send.clone(),
451    );
452
453    OngoingCodegen {
454        backend,
455
456        codegen_worker_receive,
457        shared_emitter_main,
458        coordinator: Coordinator {
459            sender: coordinator_send,
460            future: Some(coordinator_thread),
461            phantom: PhantomData,
462        },
463        output_filenames: Arc::clone(tcx.output_filenames(())),
464    }
465}
466
467fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
468    sess: &Session,
469    compiled_modules: &CompiledModules,
470) -> FxIndexMap<WorkProductId, WorkProduct> {
471    let mut work_products = FxIndexMap::default();
472
473    if sess.opts.incremental.is_none() {
474        return work_products;
475    }
476
477    let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
478
479    for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
480        let mut files = Vec::new();
481        if let Some(object_file_path) = &module.object {
482            files.push((OutputType::Object.extension(), object_file_path.as_path()));
483        }
484        if let Some(dwarf_object_file_path) = &module.dwarf_object {
485            files.push(("dwo", dwarf_object_file_path.as_path()));
486        }
487        if let Some(path) = &module.assembly {
488            files.push((OutputType::Assembly.extension(), path.as_path()));
489        }
490        if let Some(path) = &module.llvm_ir {
491            files.push((OutputType::LlvmAssembly.extension(), path.as_path()));
492        }
493        if let Some(path) = &module.bytecode {
494            files.push((OutputType::Bitcode.extension(), path.as_path()));
495        }
496        if let Some((id, product)) = copy_cgu_workproduct_to_incr_comp_cache_dir(
497            sess,
498            &module.name,
499            files.as_slice(),
500            &module.links_from_incr_cache,
501        ) {
502            work_products.insert(id, product);
503        }
504    }
505
506    work_products
507}
508
509pub fn produce_final_output_artifacts(
510    sess: &Session,
511    compiled_modules: &CompiledModules,
512    crate_output: &OutputFilenames,
513) {
514    let mut user_wants_bitcode = false;
515    let mut user_wants_objects = false;
516
517    // Produce final compile outputs.
518    let copy_gracefully = |from: &Path, to: &OutFileName| match to {
519        OutFileName::Stdout if let Err(e) = copy_to_stdout(from) => {
520            sess.dcx().emit_err(errors::CopyPath::new(from, to.as_path(), e));
521        }
522        OutFileName::Real(path) if let Err(e) = fs::copy(from, path) => {
523            sess.dcx().emit_err(errors::CopyPath::new(from, path, e));
524        }
525        _ => {}
526    };
527
528    let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
529        if let [module] = &compiled_modules.modules[..] {
530            // 1) Only one codegen unit. In this case it's no difficulty
531            //    to copy `foo.0.x` to `foo.x`.
532            let path = crate_output.temp_path_for_cgu(
533                output_type,
534                &module.name,
535                sess.invocation_temp.as_deref(),
536            );
537            let output = crate_output.path(output_type);
538            if !output_type.is_text_output() && output.is_tty() {
539                sess.dcx()
540                    .emit_err(errors::BinaryOutputToTty { shorthand: output_type.shorthand() });
541            } else {
542                copy_gracefully(&path, &output);
543            }
544            if !sess.opts.cg.save_temps && !keep_numbered {
545                // The user just wants `foo.x`, not `foo.#module-name#.x`.
546                ensure_removed(sess.dcx(), &path);
547            }
548        } else {
549            if crate_output.outputs.contains_explicit_name(&output_type) {
550                // 2) Multiple codegen units, with `--emit foo=some_name`. We have
551                //    no good solution for this case, so warn the user.
552                sess.dcx()
553                    .emit_warn(errors::IgnoringEmitPath { extension: output_type.extension() });
554            } else if crate_output.single_output_file.is_some() {
555                // 3) Multiple codegen units, with `-o some_name`. We have
556                //    no good solution for this case, so warn the user.
557                sess.dcx().emit_warn(errors::IgnoringOutput { extension: output_type.extension() });
558            } else {
559                // 4) Multiple codegen units, but no explicit name. We
560                //    just leave the `foo.0.x` files in place.
561                // (We don't have to do any work in this case.)
562            }
563        }
564    };
565
566    // Flag to indicate whether the user explicitly requested bitcode.
567    // Otherwise, we produced it only as a temporary output, and will need
568    // to get rid of it.
569    for output_type in crate_output.outputs.keys() {
570        match *output_type {
571            OutputType::Bitcode => {
572                user_wants_bitcode = true;
573                // Copy to .bc, but always keep the .0.bc. There is a later
574                // check to figure out if we should delete .0.bc files, or keep
575                // them for making an rlib.
576                copy_if_one_unit(OutputType::Bitcode, true);
577            }
578            OutputType::ThinLinkBitcode => {
579                copy_if_one_unit(OutputType::ThinLinkBitcode, false);
580            }
581            OutputType::LlvmAssembly => {
582                copy_if_one_unit(OutputType::LlvmAssembly, false);
583            }
584            OutputType::Assembly => {
585                copy_if_one_unit(OutputType::Assembly, false);
586            }
587            OutputType::Object => {
588                user_wants_objects = true;
589                copy_if_one_unit(OutputType::Object, true);
590            }
591            OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
592        }
593    }
594
595    // Clean up unwanted temporary files.
596
597    // We create the following files by default:
598    //  - #crate#.#module-name#.rcgu.bc
599    //  - #crate#.#module-name#.rcgu.o
600    //  - #crate#.o (linked from crate.##.rcgu.o)
601    //  - #crate#.bc (copied from crate.##.rcgu.bc)
602    // We may create additional files if requested by the user (through
603    // `-C save-temps` or `--emit=` flags).
604
605    if !sess.opts.cg.save_temps {
606        // Remove the temporary .#module-name#.rcgu.o objects. If the user didn't
607        // explicitly request bitcode (with --emit=bc), and the bitcode is not
608        // needed for building an rlib, then we must remove .#module-name#.bc as
609        // well.
610
611        // Specific rules for keeping .#module-name#.rcgu.bc:
612        //  - If the user requested bitcode (`user_wants_bitcode`), and
613        //    codegen_units > 1, then keep it.
614        //  - If the user requested bitcode but codegen_units == 1, then we
615        //    can toss .#module-name#.rcgu.bc because we copied it to .bc earlier.
616        //  - If we're not building an rlib and the user didn't request
617        //    bitcode, then delete .#module-name#.rcgu.bc.
618        // If you change how this works, also update back::link::link_rlib,
619        // where .#module-name#.rcgu.bc files are (maybe) deleted after making an
620        // rlib.
621        let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
622
623        let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units().as_usize() > 1;
624
625        let keep_numbered_objects =
626            needs_crate_object || (user_wants_objects && sess.codegen_units().as_usize() > 1);
627
628        for module in compiled_modules.modules.iter() {
629            if !keep_numbered_objects {
630                if let Some(ref path) = module.object {
631                    ensure_removed(sess.dcx(), path);
632                }
633
634                if let Some(ref path) = module.dwarf_object {
635                    ensure_removed(sess.dcx(), path);
636                }
637            }
638
639            if let Some(ref path) = module.bytecode {
640                if !keep_numbered_bitcode {
641                    ensure_removed(sess.dcx(), path);
642                }
643            }
644        }
645
646        if !user_wants_bitcode
647            && let Some(ref allocator_module) = compiled_modules.allocator_module
648            && let Some(ref path) = allocator_module.bytecode
649        {
650            ensure_removed(sess.dcx(), path);
651        }
652    }
653
654    if sess.opts.json_artifact_notifications {
655        if let [module] = &compiled_modules.modules[..] {
656            module.for_each_output(|_path, ty| {
657                if sess.opts.output_types.contains_key(&ty) {
658                    let descr = ty.shorthand();
659                    // for single cgu file is renamed to drop cgu specific suffix
660                    // so we regenerate it the same way
661                    let path = crate_output.path(ty);
662                    sess.dcx().emit_artifact_notification(path.as_path(), descr);
663                }
664            });
665        } else {
666            for module in &compiled_modules.modules {
667                module.for_each_output(|path, ty| {
668                    if sess.opts.output_types.contains_key(&ty) {
669                        let descr = ty.shorthand();
670                        sess.dcx().emit_artifact_notification(&path, descr);
671                    }
672                });
673            }
674        }
675    }
676
677    // We leave the following files around by default:
678    //  - #crate#.o
679    //  - #crate#.bc
680    // These are used in linking steps and will be cleaned up afterward.
681}
682
683pub(crate) enum WorkItem<B: WriteBackendMethods> {
684    /// Optimize a newly codegened, totally unoptimized module.
685    Optimize(ModuleCodegen<B::Module>),
686    /// Copy the post-LTO artifacts from the incremental cache to the output
687    /// directory.
688    CopyPostLtoArtifacts(CachedModuleCodegen),
689}
690
691enum ThinLtoWorkItem<B: WriteBackendMethods> {
692    /// Copy the post-LTO artifacts from the incremental cache to the output
693    /// directory.
694    CopyPostLtoArtifacts(CachedModuleCodegen),
695    /// Performs thin-LTO on the given module.
696    ThinLto(lto::ThinModule<B>),
697}
698
699// `pthread_setname()` on *nix ignores anything beyond the first 15
700// bytes. Use short descriptions to maximize the space available for
701// the module name.
702#[cfg(not(windows))]
703fn desc(short: &str, _long: &str, name: &str) -> String {
704    // The short label is three bytes, and is followed by a space. That
705    // leaves 11 bytes for the CGU name. How we obtain those 11 bytes
706    // depends on the CGU name form.
707    //
708    // - Non-incremental, e.g. `regex.f10ba03eb5ec7975-cgu.0`: the part
709    //   before the `-cgu.0` is the same for every CGU, so use the
710    //   `cgu.0` part. The number suffix will be different for each
711    //   CGU.
712    //
713    // - Incremental (normal), e.g. `2i52vvl2hco29us0`: use the whole
714    //   name because each CGU will have a unique ASCII hash, and the
715    //   first 11 bytes will be enough to identify it.
716    //
717    // - Incremental (with `-Zhuman-readable-cgu-names`), e.g.
718    //   `regex.f10ba03eb5ec7975-re_builder.volatile`: use the whole
719    //   name. The first 11 bytes won't be enough to uniquely identify
720    //   it, but no obvious substring will, and this is a rarely used
721    //   option so it doesn't matter much.
722    //
723    match (&short.len(), &3) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(short.len(), 3);
724    let name = if let Some(index) = name.find("-cgu.") {
725        &name[index + 1..] // +1 skips the leading '-'.
726    } else {
727        name
728    };
729    ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0} {1}", short, name))
    })format!("{short} {name}")
730}
731
732// Windows has no thread name length limit, so use more descriptive names.
733#[cfg(windows)]
734fn desc(_short: &str, long: &str, name: &str) -> String {
735    format!("{long} {name}")
736}
737
738impl<B: WriteBackendMethods> WorkItem<B> {
739    /// Generate a short description of this work item suitable for use as a thread name.
740    fn short_description(&self) -> String {
741        match self {
742            WorkItem::Optimize(m) => desc("opt", "optimize module", &m.name),
743            WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for", &m.name),
744        }
745    }
746}
747
748impl<B: WriteBackendMethods> ThinLtoWorkItem<B> {
749    /// Generate a short description of this work item suitable for use as a thread name.
750    fn short_description(&self) -> String {
751        match self {
752            ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
753                desc("cpy", "copy LTO artifacts for", &m.name)
754            }
755            ThinLtoWorkItem::ThinLto(m) => desc("lto", "thin-LTO module", m.name()),
756        }
757    }
758}
759
760/// A result produced by the backend.
761pub(crate) enum WorkItemResult<B: WriteBackendMethods> {
762    /// The backend has finished compiling a CGU, nothing more required.
763    Finished(CompiledModule),
764
765    /// The backend has finished compiling a CGU, which now needs to go through
766    /// fat LTO.
767    NeedsFatLto(FatLtoInput<B>),
768
769    /// The backend has finished compiling a CGU, which now needs to go through
770    /// thin LTO.
771    NeedsThinLto(String, B::ModuleBuffer),
772}
773
774pub enum FatLtoInput<B: WriteBackendMethods> {
775    Serialized { name: String, bitcode_path: PathBuf },
776    InMemory(ModuleCodegen<B::Module>),
777}
778
779pub enum ThinLtoInput<B: WriteBackendMethods> {
780    Red { name: String, buffer: SerializedModule<B::ModuleBuffer> },
781    Green { wp: WorkProduct, bitcode_path: PathBuf },
782}
783
784/// Actual LTO type we end up choosing based on multiple factors.
785pub(crate) enum ComputedLtoType {
786    No,
787    Thin,
788    Fat,
789}
790
791pub(crate) fn compute_per_cgu_lto_type(
792    sess_lto: &Lto,
793    linker_does_lto: bool,
794    sess_crate_types: &[CrateType],
795) -> ComputedLtoType {
796    // If the linker does LTO, we don't have to do it. Note that we
797    // keep doing full LTO, if it is requested, as not to break the
798    // assumption that the output will be a single module.
799
800    // We ignore a request for full crate graph LTO if the crate type
801    // is only an rlib, as there is no full crate graph to process,
802    // that'll happen later.
803    //
804    // This use case currently comes up primarily for targets that
805    // require LTO so the request for LTO is always unconditionally
806    // passed down to the backend, but we don't actually want to do
807    // anything about it yet until we've got a final product.
808    let is_rlib = #[allow(non_exhaustive_omitted_patterns)] match sess_crate_types {
    [CrateType::Rlib] => true,
    _ => false,
}matches!(sess_crate_types, [CrateType::Rlib]);
809
810    match sess_lto {
811        Lto::ThinLocal if !linker_does_lto => ComputedLtoType::Thin,
812        Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
813        Lto::Fat if !is_rlib => ComputedLtoType::Fat,
814        _ => ComputedLtoType::No,
815    }
816}
817
818fn execute_optimize_work_item<B: WriteBackendMethods>(
819    cgcx: &CodegenContext,
820    prof: &SelfProfilerRef,
821    shared_emitter: SharedEmitter,
822    mut module: ModuleCodegen<B::Module>,
823) -> WorkItemResult<B> {
824    let _timer = prof.generic_activity_with_arg("codegen_module_optimize", &*module.name);
825
826    B::optimize(cgcx, prof, &shared_emitter, &mut module, &cgcx.module_config);
827
828    // After we've done the initial round of optimizations we need to
829    // decide whether to synchronously codegen this module or ship it
830    // back to the coordinator thread for further LTO processing (which
831    // has to wait for all the initial modules to be optimized).
832
833    let lto_type =
834        compute_per_cgu_lto_type(&cgcx.lto, cgcx.use_linker_plugin_lto, &cgcx.crate_types);
835
836    // If we're doing some form of incremental LTO then we need to be sure to
837    // save our module to disk first.
838    let bitcode = if cgcx.module_config.emit_pre_lto_bc {
839        let filename = pre_lto_bitcode_filename(&module.name);
840        cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
841    } else {
842        None
843    };
844
845    match lto_type {
846        ComputedLtoType::No => {
847            let module = B::codegen(cgcx, &prof, &shared_emitter, module, &cgcx.module_config);
848            WorkItemResult::Finished(module)
849        }
850        ComputedLtoType::Thin => {
851            let thin_buffer = B::serialize_module(module.module_llvm, true);
852            if let Some(path) = bitcode {
853                fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
854                    {
    ::core::panicking::panic_fmt(format_args!("Error writing pre-lto-bitcode file `{0}`: {1}",
            path.display(), e));
};panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
855                });
856            }
857            WorkItemResult::NeedsThinLto(module.name, thin_buffer)
858        }
859        ComputedLtoType::Fat => match bitcode {
860            Some(path) => {
861                let buffer = B::serialize_module(module.module_llvm, false);
862                fs::write(&path, buffer.data()).unwrap_or_else(|e| {
863                    {
    ::core::panicking::panic_fmt(format_args!("Error writing pre-lto-bitcode file `{0}`: {1}",
            path.display(), e));
};panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
864                });
865                WorkItemResult::NeedsFatLto(FatLtoInput::Serialized {
866                    name: module.name,
867                    bitcode_path: path,
868                })
869            }
870            None => WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module)),
871        },
872    }
873}
874
875fn execute_copy_from_cache_work_item(
876    cgcx: &CodegenContext,
877    prof: &SelfProfilerRef,
878    shared_emitter: SharedEmitter,
879    module: CachedModuleCodegen,
880) -> CompiledModule {
881    let _timer =
882        prof.generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name);
883
884    let dcx = DiagCtxt::new(Box::new(shared_emitter));
885    let dcx = dcx.handle();
886
887    let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
888
889    let mut links_from_incr_cache = Vec::new();
890
891    let mut load_from_incr_comp_dir = |output_path: PathBuf, saved_path: &str| {
892        let source_file = in_incr_comp_dir(incr_comp_session_dir, saved_path);
893        {
    use ::tracing::__macro_support::Callsite as _;
    static __CALLSITE: ::tracing::callsite::DefaultCallsite =
        {
            static META: ::tracing::Metadata<'static> =
                {
                    ::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_ssa/src/back/write.rs:893",
                        "rustc_codegen_ssa::back::write", ::tracing::Level::DEBUG,
                        ::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_ssa/src/back/write.rs"),
                        ::tracing_core::__macro_support::Option::Some(893u32),
                        ::tracing_core::__macro_support::Option::Some("rustc_codegen_ssa::back::write"),
                        ::tracing_core::field::FieldSet::new(&["message"],
                            ::tracing_core::callsite::Identifier(&__CALLSITE)),
                        ::tracing::metadata::Kind::EVENT)
                };
            ::tracing::callsite::DefaultCallsite::new(&META)
        };
    let enabled =
        ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() &&
            {
                let interest = __CALLSITE.interest();
                !interest.is_never() &&
                    ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                        interest)
            };
    if enabled {
        (|value_set: ::tracing::field::ValueSet|
                    {
                        let meta = __CALLSITE.metadata();
                        ::tracing::Event::dispatch(meta, &value_set);
                        ;
                    })({
                #[allow(unused_imports)]
                use ::tracing::field::{debug, display, Value};
                let mut iter = __CALLSITE.metadata().fields().iter();
                __CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                    ::tracing::__macro_support::Option::Some(&format_args!("copying preexisting module `{0}` from {1:?} to {2}",
                                                    module.name, source_file, output_path.display()) as
                                            &dyn Value))])
            });
    } else { ; }
};debug!(
894            "copying preexisting module `{}` from {:?} to {}",
895            module.name,
896            source_file,
897            output_path.display()
898        );
899        match link_or_copy(&source_file, &output_path) {
900            Ok(_) => {
901                links_from_incr_cache.push(source_file);
902                Some(output_path)
903            }
904            Err(error) => {
905                dcx.emit_err(errors::CopyPathBuf { source_file, output_path, error });
906                None
907            }
908        }
909    };
910
911    let dwarf_object =
912        module.source.saved_files.get("dwo").as_ref().and_then(|saved_dwarf_object_file| {
913            let dwarf_obj_out = cgcx
914                .output_filenames
915                .split_dwarf_path(
916                    cgcx.split_debuginfo,
917                    cgcx.split_dwarf_kind,
918                    &module.name,
919                    cgcx.invocation_temp.as_deref(),
920                )
921                .expect(
922                    "saved dwarf object in work product but `split_dwarf_path` returned `None`",
923                );
924            load_from_incr_comp_dir(dwarf_obj_out, saved_dwarf_object_file)
925        });
926
927    let mut load_from_incr_cache = |perform, output_type: OutputType| {
928        if perform {
929            let saved_file = module.source.saved_files.get(output_type.extension())?;
930            let output_path = cgcx.output_filenames.temp_path_for_cgu(
931                output_type,
932                &module.name,
933                cgcx.invocation_temp.as_deref(),
934            );
935            load_from_incr_comp_dir(output_path, &saved_file)
936        } else {
937            None
938        }
939    };
940
941    let module_config = &cgcx.module_config;
942    let should_emit_obj = module_config.emit_obj != EmitObj::None;
943    let assembly = load_from_incr_cache(module_config.emit_asm, OutputType::Assembly);
944    let llvm_ir = load_from_incr_cache(module_config.emit_ir, OutputType::LlvmAssembly);
945    let bytecode = load_from_incr_cache(module_config.emit_bc, OutputType::Bitcode);
946    let object = load_from_incr_cache(should_emit_obj, OutputType::Object);
947    if should_emit_obj && object.is_none() {
948        dcx.emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
949    }
950
951    CompiledModule {
952        links_from_incr_cache,
953        kind: ModuleKind::Regular,
954        name: module.name,
955        object,
956        dwarf_object,
957        bytecode,
958        assembly,
959        llvm_ir,
960    }
961}
962
963fn do_fat_lto<B: WriteBackendMethods>(
964    cgcx: &CodegenContext,
965    prof: &SelfProfilerRef,
966    shared_emitter: SharedEmitter,
967    tm_factory: TargetMachineFactoryFn<B>,
968    exported_symbols_for_lto: &[String],
969    each_linked_rlib_for_lto: &[PathBuf],
970    needs_fat_lto: Vec<FatLtoInput<B>>,
971) -> CompiledModule {
972    let _timer = prof.verbose_generic_activity("LLVM_fatlto");
973
974    let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
975    let dcx = dcx.handle();
976
977    check_lto_allowed(&cgcx, dcx);
978
979    B::optimize_and_codegen_fat_lto(
980        cgcx,
981        prof,
982        &shared_emitter,
983        tm_factory,
984        exported_symbols_for_lto,
985        each_linked_rlib_for_lto,
986        needs_fat_lto,
987    )
988}
989
990fn do_thin_lto<B: WriteBackendMethods>(
991    cgcx: &CodegenContext,
992    prof: &SelfProfilerRef,
993    shared_emitter: SharedEmitter,
994    tm_factory: TargetMachineFactoryFn<B>,
995    exported_symbols_for_lto: Arc<Vec<String>>,
996    each_linked_rlib_for_lto: Vec<PathBuf>,
997    needs_thin_lto: Vec<ThinLtoInput<B>>,
998) -> Vec<CompiledModule> {
999    let _timer = prof.verbose_generic_activity("LLVM_thinlto");
1000
1001    let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
1002    let dcx = dcx.handle();
1003
1004    check_lto_allowed(&cgcx, dcx);
1005
1006    let (coordinator_send, coordinator_receive) = channel();
1007
1008    // First up, convert our jobserver into a helper thread so we can use normal
1009    // mpsc channels to manage our messages and such.
1010    // After we've requested tokens then we'll, when we can,
1011    // get tokens on `coordinator_receive` which will
1012    // get managed in the main loop below.
1013    let coordinator_send2 = coordinator_send.clone();
1014    let helper = jobserver::client()
1015        .into_helper_thread(move |token| {
1016            drop(coordinator_send2.send(ThinLtoMessage::Token(token)));
1017        })
1018        .expect("failed to spawn helper thread");
1019
1020    let mut work_items = ::alloc::vec::Vec::new()vec![];
1021
1022    // We have LTO work to do. Perform the serial work here of
1023    // figuring out what we're going to LTO and then push a
1024    // bunch of work items onto our queue to do LTO. This all
1025    // happens on the coordinator thread but it's very quick so
1026    // we don't worry about tokens.
1027    for (work, cost) in generate_thin_lto_work::<B>(
1028        cgcx,
1029        prof,
1030        dcx,
1031        &exported_symbols_for_lto,
1032        &each_linked_rlib_for_lto,
1033        needs_thin_lto,
1034    ) {
1035        let insertion_index =
1036            work_items.binary_search_by_key(&cost, |&(_, cost)| cost).unwrap_or_else(|e| e);
1037        work_items.insert(insertion_index, (work, cost));
1038        if cgcx.parallel {
1039            helper.request_token();
1040        }
1041    }
1042
1043    let mut codegen_aborted = None;
1044
1045    // These are the Jobserver Tokens we currently hold. Does not include
1046    // the implicit Token the compiler process owns no matter what.
1047    let mut tokens = ::alloc::vec::Vec::new()vec![];
1048
1049    // Amount of tokens that are used (including the implicit token).
1050    let mut used_token_count = 0;
1051
1052    let mut compiled_modules = ::alloc::vec::Vec::new()vec![];
1053
1054    // Run the message loop while there's still anything that needs message
1055    // processing. Note that as soon as codegen is aborted we simply want to
1056    // wait for all existing work to finish, so many of the conditions here
1057    // only apply if codegen hasn't been aborted as they represent pending
1058    // work to be done.
1059    loop {
1060        if codegen_aborted.is_none() {
1061            if used_token_count == 0 && work_items.is_empty() {
1062                // All codegen work is done.
1063                break;
1064            }
1065
1066            // Spin up what work we can, only doing this while we've got available
1067            // parallelism slots and work left to spawn.
1068            while used_token_count < tokens.len() + 1
1069                && let Some((item, _)) = work_items.pop()
1070            {
1071                spawn_thin_lto_work(
1072                    &cgcx,
1073                    prof,
1074                    shared_emitter.clone(),
1075                    Arc::clone(&tm_factory),
1076                    coordinator_send.clone(),
1077                    item,
1078                );
1079                used_token_count += 1;
1080            }
1081        } else {
1082            // Don't queue up any more work if codegen was aborted, we're
1083            // just waiting for our existing children to finish.
1084            if used_token_count == 0 {
1085                break;
1086            }
1087        }
1088
1089        // Relinquish accidentally acquired extra tokens. Subtract 1 for the implicit token.
1090        tokens.truncate(used_token_count.saturating_sub(1));
1091
1092        match coordinator_receive.recv().unwrap() {
1093            // Save the token locally and the next turn of the loop will use
1094            // this to spawn a new unit of work, or it may get dropped
1095            // immediately if we have no more work to spawn.
1096            ThinLtoMessage::Token(token) => match token {
1097                Ok(token) => {
1098                    tokens.push(token);
1099                }
1100                Err(e) => {
1101                    let msg = &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("failed to acquire jobserver token: {0}",
                e))
    })format!("failed to acquire jobserver token: {e}");
1102                    shared_emitter.fatal(msg);
1103                    codegen_aborted = Some(FatalError);
1104                }
1105            },
1106
1107            ThinLtoMessage::WorkItem { result } => {
1108                // If a thread exits successfully then we drop a token associated
1109                // with that worker and update our `used_token_count` count.
1110                // We may later re-acquire a token to continue running more work.
1111                // We may also not actually drop a token here if the worker was
1112                // running with an "ephemeral token".
1113                used_token_count -= 1;
1114
1115                match result {
1116                    Ok(compiled_module) => compiled_modules.push(compiled_module),
1117                    Err(Some(WorkerFatalError)) => {
1118                        // Like `CodegenAborted`, wait for remaining work to finish.
1119                        codegen_aborted = Some(FatalError);
1120                    }
1121                    Err(None) => {
1122                        // If the thread failed that means it panicked, so
1123                        // we abort immediately.
1124                        ::rustc_middle::util::bug::bug_fmt(format_args!("worker thread panicked"));bug!("worker thread panicked");
1125                    }
1126                }
1127            }
1128        }
1129    }
1130
1131    if let Some(codegen_aborted) = codegen_aborted {
1132        codegen_aborted.raise();
1133    }
1134
1135    compiled_modules
1136}
1137
1138fn execute_thin_lto_work_item<B: WriteBackendMethods>(
1139    cgcx: &CodegenContext,
1140    prof: &SelfProfilerRef,
1141    shared_emitter: SharedEmitter,
1142    tm_factory: TargetMachineFactoryFn<B>,
1143    module: lto::ThinModule<B>,
1144) -> CompiledModule {
1145    let _timer = prof.generic_activity_with_arg("codegen_module_perform_lto", module.name());
1146
1147    B::optimize_and_codegen_thin(cgcx, prof, &shared_emitter, tm_factory, module)
1148}
1149
1150/// Messages sent to the coordinator.
1151pub(crate) enum Message<B: WriteBackendMethods> {
1152    /// A jobserver token has become available. Sent from the jobserver helper
1153    /// thread.
1154    Token(io::Result<Acquired>),
1155
1156    /// The backend has finished processing a work item for a codegen unit.
1157    /// Sent from a backend worker thread.
1158    WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>> },
1159
1160    /// The frontend has finished generating something (backend IR or a
1161    /// post-LTO artifact) for a codegen unit, and it should be passed to the
1162    /// backend. Sent from the main thread.
1163    CodegenDone { llvm_work_item: WorkItem<B>, cost: u64 },
1164
1165    /// Similar to `CodegenDone`, but for reusing a pre-LTO artifact
1166    /// Sent from the main thread.
1167    AddImportOnlyModule { bitcode_path: PathBuf, work_product: WorkProduct },
1168
1169    /// The frontend has finished generating everything for all codegen units.
1170    /// Sent from the main thread.
1171    CodegenComplete,
1172
1173    /// Some normal-ish compiler error occurred, and codegen should be wound
1174    /// down. Sent from the main thread.
1175    CodegenAborted,
1176}
1177
1178/// Messages sent to the coordinator.
1179pub(crate) enum ThinLtoMessage {
1180    /// A jobserver token has become available. Sent from the jobserver helper
1181    /// thread.
1182    Token(io::Result<Acquired>),
1183
1184    /// The backend has finished processing a work item for a codegen unit.
1185    /// Sent from a backend worker thread.
1186    WorkItem { result: Result<CompiledModule, Option<WorkerFatalError>> },
1187}
1188
1189/// A message sent from the coordinator thread to the main thread telling it to
1190/// process another codegen unit.
1191pub struct CguMessage;
1192
1193// A cut-down version of `rustc_errors::DiagInner` that impls `Send`, which
1194// can be used to send diagnostics from codegen threads to the main thread.
1195// It's missing the following fields from `rustc_errors::DiagInner`.
1196// - `span`: it doesn't impl `Send`.
1197// - `suggestions`: it doesn't impl `Send`, and isn't used for codegen
1198//   diagnostics.
1199// - `sort_span`: it doesn't impl `Send`.
1200// - `is_lint`: lints aren't relevant during codegen.
1201// - `emitted_at`: not used for codegen diagnostics.
1202struct Diagnostic {
1203    span: Vec<SpanData>,
1204    level: Level,
1205    messages: Vec<(DiagMessage, Style)>,
1206    code: Option<ErrCode>,
1207    children: Vec<Subdiagnostic>,
1208    args: DiagArgMap,
1209}
1210
1211// A cut-down version of `rustc_errors::Subdiag` that impls `Send`. It's
1212// missing the following fields from `rustc_errors::Subdiag`.
1213// - `span`: it doesn't impl `Send`.
1214struct Subdiagnostic {
1215    level: Level,
1216    messages: Vec<(DiagMessage, Style)>,
1217}
1218
1219#[derive(#[automatically_derived]
impl ::core::cmp::PartialEq for MainThreadState {
    #[inline]
    fn eq(&self, other: &MainThreadState) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq, #[automatically_derived]
impl ::core::clone::Clone for MainThreadState {
    #[inline]
    fn clone(&self) -> MainThreadState { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for MainThreadState { }Copy, #[automatically_derived]
impl ::core::fmt::Debug for MainThreadState {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                MainThreadState::Idle => "Idle",
                MainThreadState::Codegenning => "Codegenning",
                MainThreadState::Lending => "Lending",
            })
    }
}Debug)]
1220enum MainThreadState {
1221    /// Doing nothing.
1222    Idle,
1223
1224    /// Doing codegen, i.e. MIR-to-LLVM-IR conversion.
1225    Codegenning,
1226
1227    /// Idle, but lending the compiler process's Token to an LLVM thread so it can do useful work.
1228    Lending,
1229}
1230
1231fn start_executing_work<B: ExtraBackendMethods>(
1232    backend: B,
1233    tcx: TyCtxt<'_>,
1234    crate_info: &CrateInfo,
1235    shared_emitter: SharedEmitter,
1236    codegen_worker_send: Sender<CguMessage>,
1237    coordinator_receive: Receiver<Message<B>>,
1238    regular_config: Arc<ModuleConfig>,
1239    allocator_config: Arc<ModuleConfig>,
1240    mut allocator_module: Option<ModuleCodegen<B::Module>>,
1241    coordinator_send: Sender<Message<B>>,
1242) -> thread::JoinHandle<Result<MaybeLtoModules<B>, ()>> {
1243    let sess = tcx.sess;
1244    let prof = sess.prof.clone();
1245
1246    let mut each_linked_rlib_for_lto = Vec::new();
1247    let mut each_linked_rlib_file_for_lto = Vec::new();
1248    if sess.lto() != Lto::No && sess.lto() != Lto::ThinLocal {
1249        drop(link::each_linked_rlib(crate_info, None, &mut |cnum, path| {
1250            if link::ignored_for_lto(sess, crate_info, cnum) {
1251                return;
1252            }
1253
1254            each_linked_rlib_for_lto.push(cnum);
1255            each_linked_rlib_file_for_lto.push(path.to_path_buf());
1256        }));
1257    }
1258
1259    // Compute the set of symbols we need to retain when doing LTO (if we need to)
1260    let exported_symbols_for_lto =
1261        Arc::new(lto::exported_symbols_for_lto(tcx, &each_linked_rlib_for_lto));
1262
1263    // First up, convert our jobserver into a helper thread so we can use normal
1264    // mpsc channels to manage our messages and such.
1265    // After we've requested tokens then we'll, when we can,
1266    // get tokens on `coordinator_receive` which will
1267    // get managed in the main loop below.
1268    let coordinator_send2 = coordinator_send.clone();
1269    let helper = jobserver::client()
1270        .into_helper_thread(move |token| {
1271            drop(coordinator_send2.send(Message::Token::<B>(token)));
1272        })
1273        .expect("failed to spawn helper thread");
1274
1275    let opt_level = tcx.backend_optimization_level(());
1276    let backend_features = tcx.global_backend_features(()).clone();
1277    let tm_factory = backend.target_machine_factory(tcx.sess, opt_level, &backend_features);
1278
1279    let remark_dir = if let Some(ref dir) = sess.opts.unstable_opts.remark_dir {
1280        let result = fs::create_dir_all(dir).and_then(|_| dir.canonicalize());
1281        match result {
1282            Ok(dir) => Some(dir),
1283            Err(error) => sess.dcx().emit_fatal(ErrorCreatingRemarkDir { error }),
1284        }
1285    } else {
1286        None
1287    };
1288
1289    let cgcx = CodegenContext {
1290        crate_types: tcx.crate_types().to_vec(),
1291        lto: sess.lto(),
1292        use_linker_plugin_lto: sess.opts.cg.linker_plugin_lto.enabled(),
1293        dylib_lto: sess.opts.unstable_opts.dylib_lto,
1294        prefer_dynamic: sess.opts.cg.prefer_dynamic,
1295        fewer_names: sess.fewer_names(),
1296        save_temps: sess.opts.cg.save_temps,
1297        time_trace: sess.opts.unstable_opts.llvm_time_trace,
1298        remark: sess.opts.cg.remark.clone(),
1299        remark_dir,
1300        incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
1301        output_filenames: Arc::clone(tcx.output_filenames(())),
1302        module_config: regular_config,
1303        opt_level,
1304        backend_features,
1305        msvc_imps_needed: msvc_imps_needed(tcx),
1306        is_pe_coff: tcx.sess.target.is_like_windows,
1307        target_can_use_split_dwarf: tcx.sess.target_can_use_split_dwarf(),
1308        target_arch: tcx.sess.target.arch.to_string(),
1309        target_is_like_darwin: tcx.sess.target.is_like_darwin,
1310        target_is_like_aix: tcx.sess.target.is_like_aix,
1311        target_is_like_gpu: tcx.sess.target.is_like_gpu,
1312        split_debuginfo: tcx.sess.split_debuginfo(),
1313        split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
1314        parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend,
1315        pointer_size: tcx.data_layout.pointer_size(),
1316        invocation_temp: sess.invocation_temp.clone(),
1317    };
1318
1319    // This is the "main loop" of parallel work happening for parallel codegen.
1320    // It's here that we manage parallelism, schedule work, and work with
1321    // messages coming from clients.
1322    //
1323    // There are a few environmental pre-conditions that shape how the system
1324    // is set up:
1325    //
1326    // - Error reporting can only happen on the main thread because that's the
1327    //   only place where we have access to the compiler `Session`.
1328    // - LLVM work can be done on any thread.
1329    // - Codegen can only happen on the main thread.
1330    // - Each thread doing substantial work must be in possession of a `Token`
1331    //   from the `Jobserver`.
1332    // - The compiler process always holds one `Token`. Any additional `Tokens`
1333    //   have to be requested from the `Jobserver`.
1334    //
1335    // Error Reporting
1336    // ===============
1337    // The error reporting restriction is handled separately from the rest: We
1338    // set up a `SharedEmitter` that holds an open channel to the main thread.
1339    // When an error occurs on any thread, the shared emitter will send the
1340    // error message to the receiver main thread (`SharedEmitterMain`). The
1341    // main thread will periodically query this error message queue and emit
1342    // any error messages it has received. It might even abort compilation if
1343    // it has received a fatal error. In this case we rely on all other threads
1344    // being torn down automatically with the main thread.
1345    // Since the main thread will often be busy doing codegen work, error
1346    // reporting will be somewhat delayed, since the message queue can only be
1347    // checked in between two work packages.
1348    //
1349    // Work Processing Infrastructure
1350    // ==============================
1351    // The work processing infrastructure knows three major actors:
1352    //
1353    // - the coordinator thread,
1354    // - the main thread, and
1355    // - LLVM worker threads
1356    //
1357    // The coordinator thread is running a message loop. It instructs the main
1358    // thread about what work to do when, and it will spawn off LLVM worker
1359    // threads as open LLVM WorkItems become available.
1360    //
1361    // The job of the main thread is to codegen CGUs into LLVM work packages
1362    // (since the main thread is the only thread that can do this). The main
1363    // thread will block until it receives a message from the coordinator, upon
1364    // which it will codegen one CGU, send it to the coordinator and block
1365    // again. This way the coordinator can control what the main thread is
1366    // doing.
1367    //
1368    // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
1369    // available, it will spawn off a new LLVM worker thread and let it process
1370    // a WorkItem. When a LLVM worker thread is done with its WorkItem,
1371    // it will just shut down, which also frees all resources associated with
1372    // the given LLVM module, and sends a message to the coordinator that the
1373    // WorkItem has been completed.
1374    //
1375    // Work Scheduling
1376    // ===============
1377    // The scheduler's goal is to minimize the time it takes to complete all
1378    // work there is, however, we also want to keep memory consumption low
1379    // if possible. These two goals are at odds with each other: If memory
1380    // consumption were not an issue, we could just let the main thread produce
1381    // LLVM WorkItems at full speed, assuring maximal utilization of
1382    // Tokens/LLVM worker threads. However, since codegen is usually faster
1383    // than LLVM processing, the queue of LLVM WorkItems would fill up and each
1384    // WorkItem potentially holds on to a substantial amount of memory.
1385    //
1386    // So the actual goal is to always produce just enough LLVM WorkItems as
1387    // not to starve our LLVM worker threads. That means, once we have enough
1388    // WorkItems in our queue, we can block the main thread, so it does not
1389    // produce more until we need them.
1390    //
1391    // Doing LLVM Work on the Main Thread
1392    // ----------------------------------
1393    // Since the main thread owns the compiler process's implicit `Token`, it is
1394    // wasteful to keep it blocked without doing any work. Therefore, what we do
1395    // in this case is: We spawn off an additional LLVM worker thread that helps
1396    // reduce the queue. The work it is doing corresponds to the implicit
1397    // `Token`. The coordinator will mark the main thread as being busy with
1398    // LLVM work. (The actual work happens on another OS thread but we just care
1399    // about `Tokens`, not actual threads).
1400    //
1401    // When any LLVM worker thread finishes while the main thread is marked as
1402    // "busy with LLVM work", we can do a little switcheroo: We give the Token
1403    // of the just finished thread to the LLVM worker thread that is working on
1404    // behalf of the main thread's implicit Token, thus freeing up the main
1405    // thread again. The coordinator can then again decide what the main thread
1406    // should do. This allows the coordinator to make decisions at more points
1407    // in time.
1408    //
1409    // Striking a Balance between Throughput and Memory Consumption
1410    // ------------------------------------------------------------
1411    // Since our two goals, (1) use as many Tokens as possible and (2) keep
1412    // memory consumption as low as possible, are in conflict with each other,
1413    // we have to find a trade off between them. Right now, the goal is to keep
1414    // all workers busy, which means that no worker should find the queue empty
1415    // when it is ready to start.
1416    // How do we do achieve this? Good question :) We actually never know how
1417    // many `Tokens` are potentially available so it's hard to say how much to
1418    // fill up the queue before switching the main thread to LLVM work. Also we
1419    // currently don't have a means to estimate how long a running LLVM worker
1420    // will still be busy with it's current WorkItem. However, we know the
1421    // maximal count of available Tokens that makes sense (=the number of CPU
1422    // cores), so we can take a conservative guess. The heuristic we use here
1423    // is implemented in the `queue_full_enough()` function.
1424    //
1425    // Some Background on Jobservers
1426    // -----------------------------
1427    // It's worth also touching on the management of parallelism here. We don't
1428    // want to just spawn a thread per work item because while that's optimal
1429    // parallelism it may overload a system with too many threads or violate our
1430    // configuration for the maximum amount of cpu to use for this process. To
1431    // manage this we use the `jobserver` crate.
1432    //
1433    // Job servers are an artifact of GNU make and are used to manage
1434    // parallelism between processes. A jobserver is a glorified IPC semaphore
1435    // basically. Whenever we want to run some work we acquire the semaphore,
1436    // and whenever we're done with that work we release the semaphore. In this
1437    // manner we can ensure that the maximum number of parallel workers is
1438    // capped at any one point in time.
1439    //
1440    // LTO and the coordinator thread
1441    // ------------------------------
1442    //
1443    // The final job the coordinator thread is responsible for is managing LTO
1444    // and how that works. When LTO is requested what we'll do is collect all
1445    // optimized LLVM modules into a local vector on the coordinator. Once all
1446    // modules have been codegened and optimized we hand this to the `lto`
1447    // module for further optimization. The `lto` module will return back a list
1448    // of more modules to work on, which the coordinator will continue to spawn
1449    // work for.
1450    //
1451    // Each LLVM module is automatically sent back to the coordinator for LTO if
1452    // necessary. There's already optimizations in place to avoid sending work
1453    // back to the coordinator if LTO isn't requested.
1454    let f = move || {
1455        let _profiler = if cgcx.time_trace { B::thread_profiler() } else { Box::new(()) };
1456
1457        // This is where we collect codegen units that have gone all the way
1458        // through codegen and LLVM.
1459        let mut compiled_modules = ::alloc::vec::Vec::new()vec![];
1460        let mut needs_fat_lto = Vec::new();
1461        let mut needs_thin_lto = Vec::new();
1462        let mut lto_import_only_modules = Vec::new();
1463
1464        /// Possible state transitions:
1465        /// - Ongoing -> Completed
1466        /// - Ongoing -> Aborted
1467        /// - Completed -> Aborted
1468        #[derive(#[automatically_derived]
impl ::core::fmt::Debug for CodegenState {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::write_str(f,
            match self {
                CodegenState::Ongoing => "Ongoing",
                CodegenState::Completed => "Completed",
                CodegenState::Aborted => "Aborted",
            })
    }
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for CodegenState {
    #[inline]
    fn eq(&self, other: &CodegenState) -> bool {
        let __self_discr = ::core::intrinsics::discriminant_value(self);
        let __arg1_discr = ::core::intrinsics::discriminant_value(other);
        __self_discr == __arg1_discr
    }
}PartialEq)]
1469        enum CodegenState {
1470            Ongoing,
1471            Completed,
1472            Aborted,
1473        }
1474        use CodegenState::*;
1475        let mut codegen_state = Ongoing;
1476
1477        // This is the queue of LLVM work items that still need processing.
1478        let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
1479
1480        // This are the Jobserver Tokens we currently hold. Does not include
1481        // the implicit Token the compiler process owns no matter what.
1482        let mut tokens = Vec::new();
1483
1484        let mut main_thread_state = MainThreadState::Idle;
1485
1486        // How many LLVM worker threads are running while holding a Token. This
1487        // *excludes* any that the main thread is lending a Token to.
1488        let mut running_with_own_token = 0;
1489
1490        // How many LLVM worker threads are running in total. This *includes*
1491        // any that the main thread is lending a Token to.
1492        let running_with_any_token = |main_thread_state, running_with_own_token| {
1493            running_with_own_token
1494                + if main_thread_state == MainThreadState::Lending { 1 } else { 0 }
1495        };
1496
1497        let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
1498
1499        if let Some(allocator_module) = &mut allocator_module {
1500            B::optimize(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config);
1501        }
1502
1503        // Run the message loop while there's still anything that needs message
1504        // processing. Note that as soon as codegen is aborted we simply want to
1505        // wait for all existing work to finish, so many of the conditions here
1506        // only apply if codegen hasn't been aborted as they represent pending
1507        // work to be done.
1508        loop {
1509            // While there are still CGUs to be codegened, the coordinator has
1510            // to decide how to utilize the compiler processes implicit Token:
1511            // For codegenning more CGU or for running them through LLVM.
1512            if codegen_state == Ongoing {
1513                if main_thread_state == MainThreadState::Idle {
1514                    // Compute the number of workers that will be running once we've taken as many
1515                    // items from the work queue as we can, plus one for the main thread. It's not
1516                    // critically important that we use this instead of just
1517                    // `running_with_own_token`, but it prevents the `queue_full_enough` heuristic
1518                    // from fluctuating just because a worker finished up and we decreased the
1519                    // `running_with_own_token` count, even though we're just going to increase it
1520                    // right after this when we put a new worker to work.
1521                    let extra_tokens = tokens.len().checked_sub(running_with_own_token).unwrap();
1522                    let additional_running = std::cmp::min(extra_tokens, work_items.len());
1523                    let anticipated_running = running_with_own_token + additional_running + 1;
1524
1525                    if !queue_full_enough(work_items.len(), anticipated_running) {
1526                        // The queue is not full enough, process more codegen units:
1527                        if codegen_worker_send.send(CguMessage).is_err() {
1528                            {
    ::core::panicking::panic_fmt(format_args!("Could not send CguMessage to main thread"));
}panic!("Could not send CguMessage to main thread")
1529                        }
1530                        main_thread_state = MainThreadState::Codegenning;
1531                    } else {
1532                        // The queue is full enough to not let the worker
1533                        // threads starve. Use the implicit Token to do some
1534                        // LLVM work too.
1535                        let (item, _) =
1536                            work_items.pop().expect("queue empty - queue_full_enough() broken?");
1537                        main_thread_state = MainThreadState::Lending;
1538                        spawn_work(
1539                            &cgcx,
1540                            &prof,
1541                            shared_emitter.clone(),
1542                            coordinator_send.clone(),
1543                            &mut llvm_start_time,
1544                            item,
1545                        );
1546                    }
1547                }
1548            } else if codegen_state == Completed {
1549                if running_with_any_token(main_thread_state, running_with_own_token) == 0
1550                    && work_items.is_empty()
1551                {
1552                    // All codegen work is done.
1553                    break;
1554                }
1555
1556                // In this branch, we know that everything has been codegened,
1557                // so it's just a matter of determining whether the implicit
1558                // Token is free to use for LLVM work.
1559                match main_thread_state {
1560                    MainThreadState::Idle => {
1561                        if let Some((item, _)) = work_items.pop() {
1562                            main_thread_state = MainThreadState::Lending;
1563                            spawn_work(
1564                                &cgcx,
1565                                &prof,
1566                                shared_emitter.clone(),
1567                                coordinator_send.clone(),
1568                                &mut llvm_start_time,
1569                                item,
1570                            );
1571                        } else {
1572                            // There is no unstarted work, so let the main thread
1573                            // take over for a running worker. Otherwise the
1574                            // implicit token would just go to waste.
1575                            // We reduce the `running` counter by one. The
1576                            // `tokens.truncate()` below will take care of
1577                            // giving the Token back.
1578                            if !(running_with_own_token > 0) {
    ::core::panicking::panic("assertion failed: running_with_own_token > 0")
};assert!(running_with_own_token > 0);
1579                            running_with_own_token -= 1;
1580                            main_thread_state = MainThreadState::Lending;
1581                        }
1582                    }
1583                    MainThreadState::Codegenning => ::rustc_middle::util::bug::bug_fmt(format_args!("codegen worker should not be codegenning after codegen was already completed"))bug!(
1584                        "codegen worker should not be codegenning after \
1585                              codegen was already completed"
1586                    ),
1587                    MainThreadState::Lending => {
1588                        // Already making good use of that token
1589                    }
1590                }
1591            } else {
1592                // Don't queue up any more work if codegen was aborted, we're
1593                // just waiting for our existing children to finish.
1594                if !(codegen_state == Aborted) {
    ::core::panicking::panic("assertion failed: codegen_state == Aborted")
};assert!(codegen_state == Aborted);
1595                if running_with_any_token(main_thread_state, running_with_own_token) == 0 {
1596                    break;
1597                }
1598            }
1599
1600            // Spin up what work we can, only doing this while we've got available
1601            // parallelism slots and work left to spawn.
1602            if codegen_state != Aborted {
1603                while running_with_own_token < tokens.len()
1604                    && let Some((item, _)) = work_items.pop()
1605                {
1606                    spawn_work(
1607                        &cgcx,
1608                        &prof,
1609                        shared_emitter.clone(),
1610                        coordinator_send.clone(),
1611                        &mut llvm_start_time,
1612                        item,
1613                    );
1614                    running_with_own_token += 1;
1615                }
1616            }
1617
1618            // Relinquish accidentally acquired extra tokens.
1619            tokens.truncate(running_with_own_token);
1620
1621            match coordinator_receive.recv().unwrap() {
1622                // Save the token locally and the next turn of the loop will use
1623                // this to spawn a new unit of work, or it may get dropped
1624                // immediately if we have no more work to spawn.
1625                Message::Token(token) => {
1626                    match token {
1627                        Ok(token) => {
1628                            tokens.push(token);
1629
1630                            if main_thread_state == MainThreadState::Lending {
1631                                // If the main thread token is used for LLVM work
1632                                // at the moment, we turn that thread into a regular
1633                                // LLVM worker thread, so the main thread is free
1634                                // to react to codegen demand.
1635                                main_thread_state = MainThreadState::Idle;
1636                                running_with_own_token += 1;
1637                            }
1638                        }
1639                        Err(e) => {
1640                            let msg = &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("failed to acquire jobserver token: {0}",
                e))
    })format!("failed to acquire jobserver token: {e}");
1641                            shared_emitter.fatal(msg);
1642                            codegen_state = Aborted;
1643                        }
1644                    }
1645                }
1646
1647                Message::CodegenDone { llvm_work_item, cost } => {
1648                    // We keep the queue sorted by estimated processing cost,
1649                    // so that more expensive items are processed earlier. This
1650                    // is good for throughput as it gives the main thread more
1651                    // time to fill up the queue and it avoids scheduling
1652                    // expensive items to the end.
1653                    // Note, however, that this is not ideal for memory
1654                    // consumption, as LLVM module sizes are not evenly
1655                    // distributed.
1656                    let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
1657                    let insertion_index = match insertion_index {
1658                        Ok(idx) | Err(idx) => idx,
1659                    };
1660                    work_items.insert(insertion_index, (llvm_work_item, cost));
1661
1662                    if cgcx.parallel {
1663                        helper.request_token();
1664                    }
1665                    match (&main_thread_state, &MainThreadState::Codegenning) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1666                    main_thread_state = MainThreadState::Idle;
1667                }
1668
1669                Message::CodegenComplete => {
1670                    if codegen_state != Aborted {
1671                        codegen_state = Completed;
1672                    }
1673                    match (&main_thread_state, &MainThreadState::Codegenning) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1674                    main_thread_state = MainThreadState::Idle;
1675                }
1676
1677                // If codegen is aborted that means translation was aborted due
1678                // to some normal-ish compiler error. In this situation we want
1679                // to exit as soon as possible, but we want to make sure all
1680                // existing work has finished. Flag codegen as being done, and
1681                // then conditions above will ensure no more work is spawned but
1682                // we'll keep executing this loop until `running_with_own_token`
1683                // hits 0.
1684                Message::CodegenAborted => {
1685                    codegen_state = Aborted;
1686                }
1687
1688                Message::WorkItem { result } => {
1689                    // If a thread exits successfully then we drop a token associated
1690                    // with that worker and update our `running_with_own_token` count.
1691                    // We may later re-acquire a token to continue running more work.
1692                    // We may also not actually drop a token here if the worker was
1693                    // running with an "ephemeral token".
1694                    if main_thread_state == MainThreadState::Lending {
1695                        main_thread_state = MainThreadState::Idle;
1696                    } else {
1697                        running_with_own_token -= 1;
1698                    }
1699
1700                    match result {
1701                        Ok(WorkItemResult::Finished(compiled_module)) => {
1702                            compiled_modules.push(compiled_module);
1703                        }
1704                        Ok(WorkItemResult::NeedsFatLto(fat_lto_input)) => {
1705                            if !needs_thin_lto.is_empty() {
    ::core::panicking::panic("assertion failed: needs_thin_lto.is_empty()")
};assert!(needs_thin_lto.is_empty());
1706                            needs_fat_lto.push(fat_lto_input);
1707                        }
1708                        Ok(WorkItemResult::NeedsThinLto(name, thin_buffer)) => {
1709                            if !needs_fat_lto.is_empty() {
    ::core::panicking::panic("assertion failed: needs_fat_lto.is_empty()")
};assert!(needs_fat_lto.is_empty());
1710                            needs_thin_lto.push(ThinLtoInput::Red {
1711                                name,
1712                                buffer: SerializedModule::Local(thin_buffer),
1713                            });
1714                        }
1715                        Err(Some(WorkerFatalError)) => {
1716                            // Like `CodegenAborted`, wait for remaining work to finish.
1717                            codegen_state = Aborted;
1718                        }
1719                        Err(None) => {
1720                            // If the thread failed that means it panicked, so
1721                            // we abort immediately.
1722                            ::rustc_middle::util::bug::bug_fmt(format_args!("worker thread panicked"));bug!("worker thread panicked");
1723                        }
1724                    }
1725                }
1726
1727                Message::AddImportOnlyModule { bitcode_path, work_product } => {
1728                    match (&codegen_state, &Ongoing) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(codegen_state, Ongoing);
1729                    match (&main_thread_state, &MainThreadState::Codegenning) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1730                    lto_import_only_modules.push((bitcode_path, work_product));
1731                    main_thread_state = MainThreadState::Idle;
1732                }
1733            }
1734        }
1735
1736        // Drop to print timings
1737        drop(llvm_start_time);
1738
1739        if codegen_state == Aborted {
1740            return Err(());
1741        }
1742
1743        drop(codegen_state);
1744        drop(tokens);
1745        drop(helper);
1746        if !work_items.is_empty() {
    ::core::panicking::panic("assertion failed: work_items.is_empty()")
};assert!(work_items.is_empty());
1747
1748        if !needs_fat_lto.is_empty() {
1749            if !compiled_modules.is_empty() {
    ::core::panicking::panic("assertion failed: compiled_modules.is_empty()")
};assert!(compiled_modules.is_empty());
1750            if !needs_thin_lto.is_empty() {
    ::core::panicking::panic("assertion failed: needs_thin_lto.is_empty()")
};assert!(needs_thin_lto.is_empty());
1751
1752            if let Some(allocator_module) = allocator_module.take() {
1753                needs_fat_lto.push(FatLtoInput::InMemory(allocator_module));
1754            }
1755
1756            for (bitcode_path, wp) in lto_import_only_modules {
1757                needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, bitcode_path })
1758            }
1759
1760            return Ok(MaybeLtoModules::FatLto {
1761                cgcx,
1762                exported_symbols_for_lto,
1763                each_linked_rlib_file_for_lto,
1764                needs_fat_lto,
1765            });
1766        } else if !needs_thin_lto.is_empty() || !lto_import_only_modules.is_empty() {
1767            if !compiled_modules.is_empty() {
    ::core::panicking::panic("assertion failed: compiled_modules.is_empty()")
};assert!(compiled_modules.is_empty());
1768            if !needs_fat_lto.is_empty() {
    ::core::panicking::panic("assertion failed: needs_fat_lto.is_empty()")
};assert!(needs_fat_lto.is_empty());
1769
1770            for (bitcode_path, wp) in lto_import_only_modules {
1771                needs_thin_lto.push(ThinLtoInput::Green { wp, bitcode_path })
1772            }
1773
1774            if cgcx.lto == Lto::ThinLocal {
1775                compiled_modules.extend(do_thin_lto::<B>(
1776                    &cgcx,
1777                    &prof,
1778                    shared_emitter.clone(),
1779                    tm_factory,
1780                    exported_symbols_for_lto,
1781                    each_linked_rlib_file_for_lto,
1782                    needs_thin_lto,
1783                ));
1784            } else {
1785                if let Some(allocator_module) = allocator_module.take() {
1786                    let thin_buffer = B::serialize_module(allocator_module.module_llvm, true);
1787                    needs_thin_lto.push(ThinLtoInput::Red {
1788                        name: allocator_module.name,
1789                        buffer: SerializedModule::Local(thin_buffer),
1790                    });
1791                }
1792
1793                return Ok(MaybeLtoModules::ThinLto {
1794                    cgcx,
1795                    exported_symbols_for_lto,
1796                    each_linked_rlib_file_for_lto,
1797                    needs_thin_lto,
1798                });
1799            }
1800        }
1801
1802        Ok(MaybeLtoModules::NoLto(CompiledModules {
1803            modules: compiled_modules,
1804            allocator_module: allocator_module.map(|allocator_module| {
1805                B::codegen(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config)
1806            }),
1807        }))
1808    };
1809    return std::thread::Builder::new()
1810        .name("coordinator".to_owned())
1811        .spawn(f)
1812        .expect("failed to spawn coordinator thread");
1813
1814    // A heuristic that determines if we have enough LLVM WorkItems in the
1815    // queue so that the main thread can do LLVM work instead of codegen
1816    fn queue_full_enough(items_in_queue: usize, workers_running: usize) -> bool {
1817        // This heuristic scales ahead-of-time codegen according to available
1818        // concurrency, as measured by `workers_running`. The idea is that the
1819        // more concurrency we have available, the more demand there will be for
1820        // work items, and the fuller the queue should be kept to meet demand.
1821        // An important property of this approach is that we codegen ahead of
1822        // time only as much as necessary, so as to keep fewer LLVM modules in
1823        // memory at once, thereby reducing memory consumption.
1824        //
1825        // When the number of workers running is less than the max concurrency
1826        // available to us, this heuristic can cause us to instruct the main
1827        // thread to work on an LLVM item (that is, tell it to "LLVM") instead
1828        // of codegen, even though it seems like it *should* be codegenning so
1829        // that we can create more work items and spawn more LLVM workers.
1830        //
1831        // But this is not a problem. When the main thread is told to LLVM,
1832        // according to this heuristic and how work is scheduled, there is
1833        // always at least one item in the queue, and therefore at least one
1834        // pending jobserver token request. If there *is* more concurrency
1835        // available, we will immediately receive a token, which will upgrade
1836        // the main thread's LLVM worker to a real one (conceptually), and free
1837        // up the main thread to codegen if necessary. On the other hand, if
1838        // there isn't more concurrency, then the main thread working on an LLVM
1839        // item is appropriate, as long as the queue is full enough for demand.
1840        //
1841        // Speaking of which, how full should we keep the queue? Probably less
1842        // full than you'd think. A lot has to go wrong for the queue not to be
1843        // full enough and for that to have a negative effect on compile times.
1844        //
1845        // Workers are unlikely to finish at exactly the same time, so when one
1846        // finishes and takes another work item off the queue, we often have
1847        // ample time to codegen at that point before the next worker finishes.
1848        // But suppose that codegen takes so long that the workers exhaust the
1849        // queue, and we have one or more workers that have nothing to work on.
1850        // Well, it might not be so bad. Of all the LLVM modules we create and
1851        // optimize, one has to finish last. It's not necessarily the case that
1852        // by losing some concurrency for a moment, we delay the point at which
1853        // that last LLVM module is finished and the rest of compilation can
1854        // proceed. Also, when we can't take advantage of some concurrency, we
1855        // give tokens back to the job server. That enables some other rustc to
1856        // potentially make use of the available concurrency. That could even
1857        // *decrease* overall compile time if we're lucky. But yes, if no other
1858        // rustc can make use of the concurrency, then we've squandered it.
1859        //
1860        // However, keeping the queue full is also beneficial when we have a
1861        // surge in available concurrency. Then items can be taken from the
1862        // queue immediately, without having to wait for codegen.
1863        //
1864        // So, the heuristic below tries to keep one item in the queue for every
1865        // four running workers. Based on limited benchmarking, this appears to
1866        // be more than sufficient to avoid increasing compilation times.
1867        let quarter_of_workers = workers_running - 3 * workers_running / 4;
1868        items_in_queue > 0 && items_in_queue >= quarter_of_workers
1869    }
1870}
1871
1872/// `FatalError` is explicitly not `Send`.
1873#[must_use]
1874pub(crate) struct WorkerFatalError;
1875
1876fn spawn_work<'a, B: WriteBackendMethods>(
1877    cgcx: &CodegenContext,
1878    prof: &'a SelfProfilerRef,
1879    shared_emitter: SharedEmitter,
1880    coordinator_send: Sender<Message<B>>,
1881    llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
1882    work: WorkItem<B>,
1883) {
1884    if llvm_start_time.is_none() {
1885        *llvm_start_time = Some(prof.verbose_generic_activity("LLVM_passes"));
1886    }
1887
1888    let cgcx = cgcx.clone();
1889    let prof = prof.clone();
1890
1891    let name = work.short_description();
1892    let f = move || {
1893        let _profiler = if cgcx.time_trace { B::thread_profiler() } else { Box::new(()) };
1894
1895        let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1896            WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, &prof, shared_emitter, m),
1897            WorkItem::CopyPostLtoArtifacts(m) => WorkItemResult::Finished(
1898                execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m),
1899            ),
1900        }));
1901
1902        let msg = match result {
1903            Ok(result) => Message::WorkItem::<B> { result: Ok(result) },
1904
1905            // We ignore any `FatalError` coming out of `execute_work_item`, as a
1906            // diagnostic was already sent off to the main thread - just surface
1907            // that there was an error in this worker.
1908            Err(err) if err.is::<FatalErrorMarker>() => {
1909                Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) }
1910            }
1911
1912            Err(_) => Message::WorkItem::<B> { result: Err(None) },
1913        };
1914        drop(coordinator_send.send(msg));
1915    };
1916    std::thread::Builder::new().name(name).spawn(f).expect("failed to spawn work thread");
1917}
1918
1919fn spawn_thin_lto_work<B: WriteBackendMethods>(
1920    cgcx: &CodegenContext,
1921    prof: &SelfProfilerRef,
1922    shared_emitter: SharedEmitter,
1923    tm_factory: TargetMachineFactoryFn<B>,
1924    coordinator_send: Sender<ThinLtoMessage>,
1925    work: ThinLtoWorkItem<B>,
1926) {
1927    let cgcx = cgcx.clone();
1928    let prof = prof.clone();
1929
1930    let name = work.short_description();
1931    let f = move || {
1932        let _profiler = if cgcx.time_trace { B::thread_profiler() } else { Box::new(()) };
1933
1934        let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1935            ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
1936                execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m)
1937            }
1938            ThinLtoWorkItem::ThinLto(m) => {
1939                execute_thin_lto_work_item(&cgcx, &prof, shared_emitter, tm_factory, m)
1940            }
1941        }));
1942
1943        let msg = match result {
1944            Ok(result) => ThinLtoMessage::WorkItem { result: Ok(result) },
1945
1946            // We ignore any `FatalError` coming out of `execute_work_item`, as a
1947            // diagnostic was already sent off to the main thread - just surface
1948            // that there was an error in this worker.
1949            Err(err) if err.is::<FatalErrorMarker>() => {
1950                ThinLtoMessage::WorkItem { result: Err(Some(WorkerFatalError)) }
1951            }
1952
1953            Err(_) => ThinLtoMessage::WorkItem { result: Err(None) },
1954        };
1955        drop(coordinator_send.send(msg));
1956    };
1957    std::thread::Builder::new().name(name).spawn(f).expect("failed to spawn work thread");
1958}
1959
1960enum SharedEmitterMessage {
1961    Diagnostic(Diagnostic),
1962    InlineAsmError(InlineAsmError),
1963    Fatal(String),
1964}
1965
1966pub struct InlineAsmError {
1967    pub span: SpanData,
1968    pub msg: String,
1969    pub level: Level,
1970    pub source: Option<(String, Vec<InnerSpan>)>,
1971}
1972
1973#[derive(#[automatically_derived]
impl ::core::clone::Clone for SharedEmitter {
    #[inline]
    fn clone(&self) -> SharedEmitter {
        SharedEmitter { sender: ::core::clone::Clone::clone(&self.sender) }
    }
}Clone)]
1974pub struct SharedEmitter {
1975    sender: Sender<SharedEmitterMessage>,
1976}
1977
1978pub struct SharedEmitterMain {
1979    receiver: Receiver<SharedEmitterMessage>,
1980}
1981
1982impl SharedEmitter {
1983    fn new() -> (SharedEmitter, SharedEmitterMain) {
1984        let (sender, receiver) = channel();
1985
1986        (SharedEmitter { sender }, SharedEmitterMain { receiver })
1987    }
1988
1989    pub fn inline_asm_error(&self, err: InlineAsmError) {
1990        drop(self.sender.send(SharedEmitterMessage::InlineAsmError(err)));
1991    }
1992
1993    fn fatal(&self, msg: &str) {
1994        drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
1995    }
1996}
1997
1998impl Emitter for SharedEmitter {
1999    fn emit_diagnostic(&mut self, mut diag: rustc_errors::DiagInner) {
2000        // Check that we aren't missing anything interesting when converting to
2001        // the cut-down local `DiagInner`.
2002        if !!diag.span.has_span_labels() {
    ::core::panicking::panic("assertion failed: !diag.span.has_span_labels()")
};assert!(!diag.span.has_span_labels());
2003        match (&diag.suggestions, &Suggestions::Enabled(::alloc::vec::Vec::new())) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(diag.suggestions, Suggestions::Enabled(vec![]));
2004        match (&diag.sort_span, &rustc_span::DUMMY_SP) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(diag.sort_span, rustc_span::DUMMY_SP);
2005        match (&diag.is_lint, &None) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(diag.is_lint, None);
2006        // No sensible check for `diag.emitted_at`.
2007
2008        let args = mem::replace(&mut diag.args, DiagArgMap::default());
2009        drop(
2010            self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
2011                span: diag.span.primary_spans().iter().map(|span| span.data()).collect::<Vec<_>>(),
2012                level: diag.level(),
2013                messages: diag.messages,
2014                code: diag.code,
2015                children: diag
2016                    .children
2017                    .into_iter()
2018                    .map(|child| Subdiagnostic { level: child.level, messages: child.messages })
2019                    .collect(),
2020                args,
2021            })),
2022        );
2023    }
2024
2025    fn source_map(&self) -> Option<&SourceMap> {
2026        None
2027    }
2028}
2029
2030impl SharedEmitterMain {
2031    fn check(&self, sess: &Session, blocking: bool) {
2032        loop {
2033            let message = if blocking {
2034                match self.receiver.recv() {
2035                    Ok(message) => Ok(message),
2036                    Err(_) => Err(()),
2037                }
2038            } else {
2039                match self.receiver.try_recv() {
2040                    Ok(message) => Ok(message),
2041                    Err(_) => Err(()),
2042                }
2043            };
2044
2045            match message {
2046                Ok(SharedEmitterMessage::Diagnostic(diag)) => {
2047                    // The diagnostic has been received on the main thread.
2048                    // Convert it back to a full `Diagnostic` and emit.
2049                    let dcx = sess.dcx();
2050                    let mut d =
2051                        rustc_errors::DiagInner::new_with_messages(diag.level, diag.messages);
2052                    d.span = MultiSpan::from_spans(
2053                        diag.span.into_iter().map(|span| span.span()).collect(),
2054                    );
2055                    d.code = diag.code; // may be `None`, that's ok
2056                    d.children = diag
2057                        .children
2058                        .into_iter()
2059                        .map(|sub| rustc_errors::Subdiag {
2060                            level: sub.level,
2061                            messages: sub.messages,
2062                            span: MultiSpan::new(),
2063                        })
2064                        .collect();
2065                    d.args = diag.args;
2066                    dcx.emit_diagnostic(d);
2067                    sess.dcx().abort_if_errors();
2068                }
2069                Ok(SharedEmitterMessage::InlineAsmError(inner)) => {
2070                    {
    match inner.level {
        Level::Error | Level::Warning | Level::Note => {}
        ref left_val => {
            ::core::panicking::assert_matches_failed(left_val,
                "Level::Error | Level::Warning | Level::Note",
                ::core::option::Option::None);
        }
    }
};assert_matches!(inner.level, Level::Error | Level::Warning | Level::Note);
2071                    let mut err = Diag::<()>::new(sess.dcx(), inner.level, inner.msg);
2072                    if !inner.span.is_dummy() {
2073                        err.span(inner.span.span());
2074                    }
2075
2076                    // Point to the generated assembly if it is available.
2077                    if let Some((buffer, spans)) = inner.source {
2078                        let source = sess
2079                            .source_map()
2080                            .new_source_file(FileName::inline_asm_source_code(&buffer), buffer);
2081                        let spans: Vec<_> = spans
2082                            .iter()
2083                            .map(|sp| {
2084                                Span::with_root_ctxt(
2085                                    source.normalized_byte_pos(sp.start as u32),
2086                                    source.normalized_byte_pos(sp.end as u32),
2087                                )
2088                            })
2089                            .collect();
2090                        err.span_note(spans, "instantiated into assembly here");
2091                    }
2092
2093                    err.emit();
2094                }
2095                Ok(SharedEmitterMessage::Fatal(msg)) => {
2096                    sess.dcx().fatal(msg);
2097                }
2098                Err(_) => {
2099                    break;
2100                }
2101            }
2102        }
2103    }
2104}
2105
2106pub struct Coordinator<B: WriteBackendMethods> {
2107    sender: Sender<Message<B>>,
2108    future: Option<thread::JoinHandle<Result<MaybeLtoModules<B>, ()>>>,
2109    // Only used for the Message type.
2110    phantom: PhantomData<B>,
2111}
2112
2113impl<B: WriteBackendMethods> Coordinator<B> {
2114    fn join(mut self) -> std::thread::Result<Result<MaybeLtoModules<B>, ()>> {
2115        self.future.take().unwrap().join()
2116    }
2117}
2118
2119impl<B: WriteBackendMethods> Drop for Coordinator<B> {
2120    fn drop(&mut self) {
2121        if let Some(future) = self.future.take() {
2122            // If we haven't joined yet, signal to the coordinator that it should spawn no more
2123            // work, and wait for worker threads to finish.
2124            drop(self.sender.send(Message::CodegenAborted::<B>));
2125            drop(future.join());
2126        }
2127    }
2128}
2129
2130pub struct OngoingCodegen<B: WriteBackendMethods> {
2131    backend: B,
2132    output_filenames: Arc<OutputFilenames>,
2133    // Field order below is intended to terminate the coordinator thread before two fields below
2134    // drop and prematurely close channels used by coordinator thread. See `Coordinator`'s
2135    // `Drop` implementation for more info.
2136    pub(crate) coordinator: Coordinator<B>,
2137    codegen_worker_receive: Receiver<CguMessage>,
2138    shared_emitter_main: SharedEmitterMain,
2139}
2140
2141impl<B: WriteBackendMethods> OngoingCodegen<B> {
2142    pub fn join(self, sess: &Session) -> (CompiledModules, FxIndexMap<WorkProductId, WorkProduct>) {
2143        self.shared_emitter_main.check(sess, true);
2144
2145        let maybe_lto_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
2146            Ok(Ok(maybe_lto_modules)) => maybe_lto_modules,
2147            Ok(Err(())) => {
2148                sess.dcx().abort_if_errors();
2149                {
    ::core::panicking::panic_fmt(format_args!("expected abort due to worker thread errors"));
}panic!("expected abort due to worker thread errors")
2150            }
2151            Err(_) => {
2152                ::rustc_middle::util::bug::bug_fmt(format_args!("panic during codegen/LLVM phase"));bug!("panic during codegen/LLVM phase");
2153            }
2154        });
2155
2156        sess.dcx().abort_if_errors();
2157
2158        let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
2159
2160        // Catch fatal errors to ensure shared_emitter_main.check() can emit the actual diagnostics
2161        let compiled_modules = catch_fatal_errors(|| match maybe_lto_modules {
2162            MaybeLtoModules::NoLto(compiled_modules) => {
2163                drop(shared_emitter);
2164                compiled_modules
2165            }
2166            MaybeLtoModules::FatLto {
2167                cgcx,
2168                exported_symbols_for_lto,
2169                each_linked_rlib_file_for_lto,
2170                needs_fat_lto,
2171            } => {
2172                let tm_factory = self.backend.target_machine_factory(
2173                    sess,
2174                    cgcx.opt_level,
2175                    &cgcx.backend_features,
2176                );
2177
2178                CompiledModules {
2179                    modules: ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [do_fat_lto(&cgcx, &sess.prof, shared_emitter, tm_factory,
                    &exported_symbols_for_lto, &each_linked_rlib_file_for_lto,
                    needs_fat_lto)]))vec![do_fat_lto(
2180                        &cgcx,
2181                        &sess.prof,
2182                        shared_emitter,
2183                        tm_factory,
2184                        &exported_symbols_for_lto,
2185                        &each_linked_rlib_file_for_lto,
2186                        needs_fat_lto,
2187                    )],
2188                    allocator_module: None,
2189                }
2190            }
2191            MaybeLtoModules::ThinLto {
2192                cgcx,
2193                exported_symbols_for_lto,
2194                each_linked_rlib_file_for_lto,
2195                needs_thin_lto,
2196            } => {
2197                let tm_factory = self.backend.target_machine_factory(
2198                    sess,
2199                    cgcx.opt_level,
2200                    &cgcx.backend_features,
2201                );
2202
2203                CompiledModules {
2204                    modules: do_thin_lto::<B>(
2205                        &cgcx,
2206                        &sess.prof,
2207                        shared_emitter,
2208                        tm_factory,
2209                        exported_symbols_for_lto,
2210                        each_linked_rlib_file_for_lto,
2211                        needs_thin_lto,
2212                    ),
2213                    allocator_module: None,
2214                }
2215            }
2216        });
2217
2218        shared_emitter_main.check(sess, true);
2219
2220        sess.dcx().abort_if_errors();
2221
2222        let mut compiled_modules =
2223            compiled_modules.expect("fatal error emitted but not sent to SharedEmitter");
2224
2225        // Regardless of what order these modules completed in, report them to
2226        // the backend in the same order every time to ensure that we're handing
2227        // out deterministic results.
2228        compiled_modules.modules.sort_by(|a, b| a.name.cmp(&b.name));
2229
2230        let work_products =
2231            copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
2232        produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
2233
2234        (compiled_modules, work_products)
2235    }
2236
2237    pub(crate) fn codegen_finished(&self, tcx: TyCtxt<'_>) {
2238        self.wait_for_signal_to_codegen_item();
2239        self.check_for_errors(tcx.sess);
2240        drop(self.coordinator.sender.send(Message::CodegenComplete::<B>));
2241    }
2242
2243    pub(crate) fn check_for_errors(&self, sess: &Session) {
2244        self.shared_emitter_main.check(sess, false);
2245    }
2246
2247    pub(crate) fn wait_for_signal_to_codegen_item(&self) {
2248        match self.codegen_worker_receive.recv() {
2249            Ok(CguMessage) => {
2250                // Ok to proceed.
2251            }
2252            Err(_) => {
2253                // One of the LLVM threads must have panicked, fall through so
2254                // error handling can be reached.
2255            }
2256        }
2257    }
2258}
2259
2260pub(crate) fn submit_codegened_module_to_llvm<B: WriteBackendMethods>(
2261    coordinator: &Coordinator<B>,
2262    module: ModuleCodegen<B::Module>,
2263    cost: u64,
2264) {
2265    let llvm_work_item = WorkItem::Optimize(module);
2266    drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost }));
2267}
2268
2269pub(crate) fn submit_post_lto_module_to_llvm<B: WriteBackendMethods>(
2270    coordinator: &Coordinator<B>,
2271    module: CachedModuleCodegen,
2272) {
2273    let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
2274    drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost: 0 }));
2275}
2276
2277pub(crate) fn submit_pre_lto_module_to_llvm<B: WriteBackendMethods>(
2278    tcx: TyCtxt<'_>,
2279    coordinator: &Coordinator<B>,
2280    module: CachedModuleCodegen,
2281) {
2282    let filename = pre_lto_bitcode_filename(&module.name);
2283    let bitcode_path = in_incr_comp_dir_sess(tcx.sess, &filename);
2284    // Schedule the module to be loaded
2285    drop(
2286        coordinator
2287            .sender
2288            .send(Message::AddImportOnlyModule::<B> { bitcode_path, work_product: module.source }),
2289    );
2290}
2291
2292fn pre_lto_bitcode_filename(module_name: &str) -> String {
2293    ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0}.{1}", module_name,
                PRE_LTO_BC_EXT))
    })format!("{module_name}.{PRE_LTO_BC_EXT}")
2294}
2295
2296fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
2297    // This should never be true (because it's not supported). If it is true,
2298    // something is wrong with commandline arg validation.
2299    if !!(tcx.sess.opts.cg.linker_plugin_lto.enabled() &&
                        tcx.sess.target.is_like_windows &&
                    tcx.sess.opts.cg.prefer_dynamic) {
    ::core::panicking::panic("assertion failed: !(tcx.sess.opts.cg.linker_plugin_lto.enabled() &&\n                tcx.sess.target.is_like_windows &&\n            tcx.sess.opts.cg.prefer_dynamic)")
};assert!(
2300        !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
2301            && tcx.sess.target.is_like_windows
2302            && tcx.sess.opts.cg.prefer_dynamic)
2303    );
2304
2305    // We need to generate _imp__ symbol if we are generating an rlib or we include one
2306    // indirectly from ThinLTO. In theory these are not needed as ThinLTO could resolve
2307    // these, but it currently does not do so.
2308    let can_have_static_objects =
2309        tcx.sess.lto() == Lto::Thin || tcx.crate_types().contains(&CrateType::Rlib);
2310
2311    tcx.sess.target.is_like_windows &&
2312    can_have_static_objects   &&
2313    // ThinLTO can't handle this workaround in all cases, so we don't
2314    // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
2315    // dynamic linking when linker plugin LTO is enabled.
2316    !tcx.sess.opts.cg.linker_plugin_lto.enabled()
2317}