1use std::marker::PhantomData;
2use std::panic::AssertUnwindSafe;
3use std::path::{Path, PathBuf};
4use std::sync::Arc;
5use std::sync::mpsc::{Receiver, Sender, channel};
6use std::{assert_matches, fs, io, mem, str, thread};
78use rustc_abi::Size;
9use rustc_data_structures::fx::FxIndexMap;
10use rustc_data_structures::jobserver::{self, Acquired};
11use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard};
12use rustc_errors::emitter::Emitter;
13use rustc_errors::{
14Diag, DiagArgMap, DiagCtxt, DiagCtxtHandle, DiagMessage, ErrCode, FatalError, FatalErrorMarker,
15Level, MultiSpan, Style, Suggestions, catch_fatal_errors,
16};
17use rustc_fs_util::link_or_copy;
18use rustc_hir::find_attr;
19use rustc_incremental::{
20copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
21};
22use rustc_macros::{Decodable, Encodable};
23use rustc_metadata::fs::copy_to_stdout;
24use rustc_middle::bug;
25use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
26use rustc_middle::ty::TyCtxt;
27use rustc_session::Session;
28use rustc_session::config::{
29self, CrateType, Lto, OptLevel, OutFileName, OutputFilenames, OutputType, Passes,
30SwitchWithOptPath,
31};
32use rustc_span::source_map::SourceMap;
33use rustc_span::{FileName, InnerSpan, Span, SpanData};
34use rustc_target::spec::{MergeFunctions, SanitizerSet};
35use tracing::debug;
3637use crate::back::link::{self, ensure_removed};
38use crate::back::lto::{self, SerializedModule, check_lto_allowed};
39use crate::errors::ErrorCreatingRemarkDir;
40use crate::traits::*;
41use crate::{
42CachedModuleCodegen, CompiledModule, CompiledModules, CrateInfo, ModuleCodegen, ModuleKind,
43errors,
44};
4546const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
4748/// What kind of object file to emit.
49#[derive(#[automatically_derived]
impl ::core::clone::Clone for EmitObj {
#[inline]
fn clone(&self) -> EmitObj {
let _: ::core::clone::AssertParamIsClone<BitcodeSection>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for EmitObj { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for EmitObj {
#[inline]
fn eq(&self, other: &EmitObj) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr &&
match (self, other) {
(EmitObj::ObjectCode(__self_0), EmitObj::ObjectCode(__arg1_0))
=> __self_0 == __arg1_0,
_ => true,
}
}
}PartialEq, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for EmitObj {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
EmitObj::None => { 0usize }
EmitObj::Bitcode => { 1usize }
EmitObj::ObjectCode(ref __binding_0) => { 2usize }
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
EmitObj::None => {}
EmitObj::Bitcode => {}
EmitObj::ObjectCode(ref __binding_0) => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for EmitObj {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => { EmitObj::None }
1usize => { EmitObj::Bitcode }
2usize => {
EmitObj::ObjectCode(::rustc_serialize::Decodable::decode(__decoder))
}
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `EmitObj`, expected 0..3, actual {0}",
n));
}
}
}
}
};Decodable)]
50pub enum EmitObj {
51// No object file.
52None,
5354// Just uncompressed llvm bitcode. Provides easy compatibility with
55 // emscripten's ecc compiler, when used as the linker.
56Bitcode,
5758// Object code, possibly augmented with a bitcode section.
59ObjectCode(BitcodeSection),
60}
6162/// What kind of llvm bitcode section to embed in an object file.
63#[derive(#[automatically_derived]
impl ::core::clone::Clone for BitcodeSection {
#[inline]
fn clone(&self) -> BitcodeSection { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for BitcodeSection { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for BitcodeSection {
#[inline]
fn eq(&self, other: &BitcodeSection) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for BitcodeSection {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
BitcodeSection::None => { 0usize }
BitcodeSection::Full => { 1usize }
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
BitcodeSection::None => {}
BitcodeSection::Full => {}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for BitcodeSection {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => { BitcodeSection::None }
1usize => { BitcodeSection::Full }
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `BitcodeSection`, expected 0..2, actual {0}",
n));
}
}
}
}
};Decodable)]
64pub enum BitcodeSection {
65// No bitcode section.
66None,
6768// A full, uncompressed bitcode section.
69Full,
70}
7172/// Module-specific configuration for `optimize_and_codegen`.
73#[derive(const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for ModuleConfig {
fn encode(&self, __encoder: &mut __E) {
match *self {
ModuleConfig {
passes: ref __binding_0,
opt_level: ref __binding_1,
pgo_gen: ref __binding_2,
pgo_use: ref __binding_3,
pgo_sample_use: ref __binding_4,
debug_info_for_profiling: ref __binding_5,
instrument_coverage: ref __binding_6,
sanitizer: ref __binding_7,
sanitizer_recover: ref __binding_8,
sanitizer_dataflow_abilist: ref __binding_9,
sanitizer_memory_track_origins: ref __binding_10,
emit_pre_lto_bc: ref __binding_11,
emit_bc: ref __binding_12,
emit_ir: ref __binding_13,
emit_asm: ref __binding_14,
emit_obj: ref __binding_15,
emit_thin_lto_summary: ref __binding_16,
verify_llvm_ir: ref __binding_17,
lint_llvm_ir: ref __binding_18,
no_prepopulate_passes: ref __binding_19,
no_builtins: ref __binding_20,
vectorize_loop: ref __binding_21,
vectorize_slp: ref __binding_22,
merge_functions: ref __binding_23,
emit_lifetime_markers: ref __binding_24,
llvm_plugins: ref __binding_25,
autodiff: ref __binding_26,
offload: ref __binding_27 } => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_2,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_3,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_4,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_5,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_6,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_7,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_8,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_9,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_10,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_11,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_12,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_13,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_14,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_15,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_16,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_17,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_18,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_19,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_20,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_21,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_22,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_23,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_24,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_25,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_26,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_27,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for ModuleConfig {
fn decode(__decoder: &mut __D) -> Self {
ModuleConfig {
passes: ::rustc_serialize::Decodable::decode(__decoder),
opt_level: ::rustc_serialize::Decodable::decode(__decoder),
pgo_gen: ::rustc_serialize::Decodable::decode(__decoder),
pgo_use: ::rustc_serialize::Decodable::decode(__decoder),
pgo_sample_use: ::rustc_serialize::Decodable::decode(__decoder),
debug_info_for_profiling: ::rustc_serialize::Decodable::decode(__decoder),
instrument_coverage: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer_recover: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer_dataflow_abilist: ::rustc_serialize::Decodable::decode(__decoder),
sanitizer_memory_track_origins: ::rustc_serialize::Decodable::decode(__decoder),
emit_pre_lto_bc: ::rustc_serialize::Decodable::decode(__decoder),
emit_bc: ::rustc_serialize::Decodable::decode(__decoder),
emit_ir: ::rustc_serialize::Decodable::decode(__decoder),
emit_asm: ::rustc_serialize::Decodable::decode(__decoder),
emit_obj: ::rustc_serialize::Decodable::decode(__decoder),
emit_thin_lto_summary: ::rustc_serialize::Decodable::decode(__decoder),
verify_llvm_ir: ::rustc_serialize::Decodable::decode(__decoder),
lint_llvm_ir: ::rustc_serialize::Decodable::decode(__decoder),
no_prepopulate_passes: ::rustc_serialize::Decodable::decode(__decoder),
no_builtins: ::rustc_serialize::Decodable::decode(__decoder),
vectorize_loop: ::rustc_serialize::Decodable::decode(__decoder),
vectorize_slp: ::rustc_serialize::Decodable::decode(__decoder),
merge_functions: ::rustc_serialize::Decodable::decode(__decoder),
emit_lifetime_markers: ::rustc_serialize::Decodable::decode(__decoder),
llvm_plugins: ::rustc_serialize::Decodable::decode(__decoder),
autodiff: ::rustc_serialize::Decodable::decode(__decoder),
offload: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable)]
74pub struct ModuleConfig {
75/// Names of additional optimization passes to run.
76pub passes: Vec<String>,
77/// Some(level) to optimize at a certain level, or None to run
78 /// absolutely no optimizations (used for the allocator module).
79pub opt_level: Option<config::OptLevel>,
8081pub pgo_gen: SwitchWithOptPath,
82pub pgo_use: Option<PathBuf>,
83pub pgo_sample_use: Option<PathBuf>,
84pub debug_info_for_profiling: bool,
85pub instrument_coverage: bool,
8687pub sanitizer: SanitizerSet,
88pub sanitizer_recover: SanitizerSet,
89pub sanitizer_dataflow_abilist: Vec<String>,
90pub sanitizer_memory_track_origins: usize,
9192// Flags indicating which outputs to produce.
93pub emit_pre_lto_bc: bool,
94pub emit_bc: bool,
95pub emit_ir: bool,
96pub emit_asm: bool,
97pub emit_obj: EmitObj,
98pub emit_thin_lto_summary: bool,
99100// Miscellaneous flags. These are mostly copied from command-line
101 // options.
102pub verify_llvm_ir: bool,
103pub lint_llvm_ir: bool,
104pub no_prepopulate_passes: bool,
105pub no_builtins: bool,
106pub vectorize_loop: bool,
107pub vectorize_slp: bool,
108pub merge_functions: bool,
109pub emit_lifetime_markers: bool,
110pub llvm_plugins: Vec<String>,
111pub autodiff: Vec<config::AutoDiff>,
112pub offload: Vec<config::Offload>,
113}
114115impl ModuleConfig {
116fn new(kind: ModuleKind, tcx: TyCtxt<'_>, no_builtins: bool) -> ModuleConfig {
117// If it's a regular module, use `$regular`, otherwise use `$other`.
118 // `$regular` and `$other` are evaluated lazily.
119macro_rules! if_regular {
120 ($regular: expr, $other: expr) => {
121if let ModuleKind::Regular = kind { $regular } else { $other }
122 };
123 }
124125let sess = tcx.sess;
126let opt_level_and_size = if let ModuleKind::Regular = kind { Some(sess.opts.optimize) } else { None }if_regular!(Some(sess.opts.optimize), None);
127128let save_temps = sess.opts.cg.save_temps;
129130let should_emit_obj = sess.opts.output_types.contains_key(&OutputType::Exe)
131 || match kind {
132 ModuleKind::Regular => sess.opts.output_types.contains_key(&OutputType::Object),
133 ModuleKind::Allocator => false,
134 };
135136let emit_obj = if !should_emit_obj {
137 EmitObj::None138 } else if sess.target.obj_is_bitcode
139 || (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins)
140 {
141// This case is selected if the target uses objects as bitcode, or
142 // if linker plugin LTO is enabled. In the linker plugin LTO case
143 // the assumption is that the final link-step will read the bitcode
144 // and convert it to object code. This may be done by either the
145 // native linker or rustc itself.
146 //
147 // Note, however, that the linker-plugin-lto requested here is
148 // explicitly ignored for `#![no_builtins]` crates. These crates are
149 // specifically ignored by rustc's LTO passes and wouldn't work if
150 // loaded into the linker. These crates define symbols that LLVM
151 // lowers intrinsics to, and these symbol dependencies aren't known
152 // until after codegen. As a result any crate marked
153 // `#![no_builtins]` is assumed to not participate in LTO and
154 // instead goes on to generate object code.
155EmitObj::Bitcode156 } else if need_bitcode_in_object(tcx) || sess.target.requires_lto {
157 EmitObj::ObjectCode(BitcodeSection::Full)
158 } else {
159 EmitObj::ObjectCode(BitcodeSection::None)
160 };
161162ModuleConfig {
163 passes: if let ModuleKind::Regular = kind {
sess.opts.cg.passes.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.cg.passes.clone(), vec![]),
164165 opt_level: opt_level_and_size,
166167 pgo_gen: if let ModuleKind::Regular = kind {
sess.opts.cg.profile_generate.clone()
} else { SwitchWithOptPath::Disabled }if_regular!(
168 sess.opts.cg.profile_generate.clone(),
169 SwitchWithOptPath::Disabled
170 ),
171 pgo_use: if let ModuleKind::Regular = kind {
sess.opts.cg.profile_use.clone()
} else { None }if_regular!(sess.opts.cg.profile_use.clone(), None),
172 pgo_sample_use: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.profile_sample_use.clone()
} else { None }if_regular!(sess.opts.unstable_opts.profile_sample_use.clone(), None),
173 debug_info_for_profiling: sess.opts.unstable_opts.debug_info_for_profiling,
174 instrument_coverage: if let ModuleKind::Regular = kind {
sess.instrument_coverage()
} else { false }if_regular!(sess.instrument_coverage(), false),
175176 sanitizer: if let ModuleKind::Regular = kind {
sess.sanitizers()
} else { SanitizerSet::empty() }if_regular!(sess.sanitizers(), SanitizerSet::empty()),
177 sanitizer_dataflow_abilist: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.sanitizer_dataflow_abilist.clone()
} else { Vec::new() }if_regular!(
178 sess.opts.unstable_opts.sanitizer_dataflow_abilist.clone(),
179 Vec::new()
180 ),
181 sanitizer_recover: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.sanitizer_recover
} else { SanitizerSet::empty() }if_regular!(
182 sess.opts.unstable_opts.sanitizer_recover,
183 SanitizerSet::empty()
184 ),
185 sanitizer_memory_track_origins: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.sanitizer_memory_track_origins
} else { 0 }if_regular!(
186 sess.opts.unstable_opts.sanitizer_memory_track_origins,
1870
188),
189190 emit_pre_lto_bc: if let ModuleKind::Regular = kind {
save_temps || need_pre_lto_bitcode_for_incr_comp(sess)
} else { false }if_regular!(
191 save_temps || need_pre_lto_bitcode_for_incr_comp(sess),
192false
193),
194 emit_bc: if let ModuleKind::Regular = kind {
save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode)
} else { save_temps }if_regular!(
195 save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode),
196 save_temps
197 ),
198 emit_ir: if let ModuleKind::Regular = kind {
sess.opts.output_types.contains_key(&OutputType::LlvmAssembly)
} else { false }if_regular!(
199 sess.opts.output_types.contains_key(&OutputType::LlvmAssembly),
200false
201),
202 emit_asm: if let ModuleKind::Regular = kind {
sess.opts.output_types.contains_key(&OutputType::Assembly)
} else { false }if_regular!(
203 sess.opts.output_types.contains_key(&OutputType::Assembly),
204false
205),
206emit_obj,
207 emit_thin_lto_summary: if let ModuleKind::Regular = kind {
sess.opts.output_types.contains_key(&OutputType::ThinLinkBitcode)
} else { false }if_regular!(
208 sess.opts.output_types.contains_key(&OutputType::ThinLinkBitcode),
209false
210),
211212 verify_llvm_ir: sess.verify_llvm_ir(),
213 lint_llvm_ir: sess.opts.unstable_opts.lint_llvm_ir,
214 no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
215 no_builtins: no_builtins || sess.target.no_builtins,
216217// Copy what clang does by turning on loop vectorization at O2 and
218 // slp vectorization at O3.
219vectorize_loop: !sess.opts.cg.no_vectorize_loops
220 && (sess.opts.optimize == config::OptLevel::More221 || sess.opts.optimize == config::OptLevel::Aggressive),
222 vectorize_slp: !sess.opts.cg.no_vectorize_slp
223 && sess.opts.optimize == config::OptLevel::Aggressive,
224225// Some targets (namely, NVPTX) interact badly with the
226 // MergeFunctions pass. This is because MergeFunctions can generate
227 // new function calls which may interfere with the target calling
228 // convention; e.g. for the NVPTX target, PTX kernels should not
229 // call other PTX kernels. MergeFunctions can also be configured to
230 // generate aliases instead, but aliases are not supported by some
231 // backends (again, NVPTX). Therefore, allow targets to opt out of
232 // the MergeFunctions pass, but otherwise keep the pass enabled (at
233 // O2 and O3) since it can be useful for reducing code size.
234merge_functions: match sess235 .opts
236 .unstable_opts
237 .merge_functions
238 .unwrap_or(sess.target.merge_functions)
239 {
240 MergeFunctions::Disabled => false,
241 MergeFunctions::Trampolines | MergeFunctions::Aliases => {
242use config::OptLevel::*;
243match sess.opts.optimize {
244Aggressive | More | SizeMin | Size => true,
245Less | No => false,
246 }
247 }
248 },
249250 emit_lifetime_markers: sess.emit_lifetime_markers(),
251 llvm_plugins: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.llvm_plugins.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
252 autodiff: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.autodiff.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.autodiff.clone(), vec![]),
253 offload: if let ModuleKind::Regular = kind {
sess.opts.unstable_opts.offload.clone()
} else { ::alloc::vec::Vec::new() }if_regular!(sess.opts.unstable_opts.offload.clone(), vec![]),
254 }
255 }
256257pub fn bitcode_needed(&self) -> bool {
258self.emit_bc
259 || self.emit_thin_lto_summary
260 || self.emit_obj == EmitObj::Bitcode261 || self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
262 }
263264pub fn embed_bitcode(&self) -> bool {
265self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
266 }
267}
268269/// Configuration passed to the function returned by the `target_machine_factory`.
270pub struct TargetMachineFactoryConfig {
271/// Split DWARF is enabled in LLVM by checking that `TM.MCOptions.SplitDwarfFile` isn't empty,
272 /// so the path to the dwarf object has to be provided when we create the target machine.
273 /// This can be ignored by backends which do not need it for their Split DWARF support.
274pub split_dwarf_file: Option<PathBuf>,
275276/// The name of the output object file. Used for setting OutputFilenames in target options
277 /// so that LLVM can emit the CodeView S_OBJNAME record in pdb files
278pub output_obj_file: Option<PathBuf>,
279}
280281impl TargetMachineFactoryConfig {
282pub fn new(cgcx: &CodegenContext, module_name: &str) -> TargetMachineFactoryConfig {
283let split_dwarf_file = if cgcx.target_can_use_split_dwarf {
284cgcx.output_filenames.split_dwarf_path(
285cgcx.split_debuginfo,
286cgcx.split_dwarf_kind,
287module_name,
288cgcx.invocation_temp.as_deref(),
289 )
290 } else {
291None292 };
293294let output_obj_file = Some(cgcx.output_filenames.temp_path_for_cgu(
295 OutputType::Object,
296module_name,
297cgcx.invocation_temp.as_deref(),
298 ));
299TargetMachineFactoryConfig { split_dwarf_file, output_obj_file }
300 }
301}
302303pub type TargetMachineFactoryFn<B> = Arc<
304dyn Fn(
305DiagCtxtHandle<'_>,
306TargetMachineFactoryConfig,
307 ) -> <B as WriteBackendMethods>::TargetMachine308 + Send309 + Sync,
310>;
311312/// Additional resources used by optimize_and_codegen (not module specific)
313#[derive(#[automatically_derived]
impl ::core::clone::Clone for CodegenContext {
#[inline]
fn clone(&self) -> CodegenContext {
CodegenContext {
lto: ::core::clone::Clone::clone(&self.lto),
use_linker_plugin_lto: ::core::clone::Clone::clone(&self.use_linker_plugin_lto),
dylib_lto: ::core::clone::Clone::clone(&self.dylib_lto),
prefer_dynamic: ::core::clone::Clone::clone(&self.prefer_dynamic),
save_temps: ::core::clone::Clone::clone(&self.save_temps),
fewer_names: ::core::clone::Clone::clone(&self.fewer_names),
time_trace: ::core::clone::Clone::clone(&self.time_trace),
crate_types: ::core::clone::Clone::clone(&self.crate_types),
output_filenames: ::core::clone::Clone::clone(&self.output_filenames),
invocation_temp: ::core::clone::Clone::clone(&self.invocation_temp),
module_config: ::core::clone::Clone::clone(&self.module_config),
opt_level: ::core::clone::Clone::clone(&self.opt_level),
backend_features: ::core::clone::Clone::clone(&self.backend_features),
msvc_imps_needed: ::core::clone::Clone::clone(&self.msvc_imps_needed),
is_pe_coff: ::core::clone::Clone::clone(&self.is_pe_coff),
target_can_use_split_dwarf: ::core::clone::Clone::clone(&self.target_can_use_split_dwarf),
target_arch: ::core::clone::Clone::clone(&self.target_arch),
target_is_like_darwin: ::core::clone::Clone::clone(&self.target_is_like_darwin),
target_is_like_aix: ::core::clone::Clone::clone(&self.target_is_like_aix),
target_is_like_gpu: ::core::clone::Clone::clone(&self.target_is_like_gpu),
split_debuginfo: ::core::clone::Clone::clone(&self.split_debuginfo),
split_dwarf_kind: ::core::clone::Clone::clone(&self.split_dwarf_kind),
pointer_size: ::core::clone::Clone::clone(&self.pointer_size),
remark: ::core::clone::Clone::clone(&self.remark),
remark_dir: ::core::clone::Clone::clone(&self.remark_dir),
incr_comp_session_dir: ::core::clone::Clone::clone(&self.incr_comp_session_dir),
parallel: ::core::clone::Clone::clone(&self.parallel),
}
}
}Clone, const _: () =
{
impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
for CodegenContext {
fn encode(&self, __encoder: &mut __E) {
match *self {
CodegenContext {
lto: ref __binding_0,
use_linker_plugin_lto: ref __binding_1,
dylib_lto: ref __binding_2,
prefer_dynamic: ref __binding_3,
save_temps: ref __binding_4,
fewer_names: ref __binding_5,
time_trace: ref __binding_6,
crate_types: ref __binding_7,
output_filenames: ref __binding_8,
invocation_temp: ref __binding_9,
module_config: ref __binding_10,
opt_level: ref __binding_11,
backend_features: ref __binding_12,
msvc_imps_needed: ref __binding_13,
is_pe_coff: ref __binding_14,
target_can_use_split_dwarf: ref __binding_15,
target_arch: ref __binding_16,
target_is_like_darwin: ref __binding_17,
target_is_like_aix: ref __binding_18,
target_is_like_gpu: ref __binding_19,
split_debuginfo: ref __binding_20,
split_dwarf_kind: ref __binding_21,
pointer_size: ref __binding_22,
remark: ref __binding_23,
remark_dir: ref __binding_24,
incr_comp_session_dir: ref __binding_25,
parallel: ref __binding_26 } => {
::rustc_serialize::Encodable::<__E>::encode(__binding_0,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_1,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_2,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_3,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_4,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_5,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_6,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_7,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_8,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_9,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_10,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_11,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_12,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_13,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_14,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_15,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_16,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_17,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_18,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_19,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_20,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_21,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_22,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_23,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_24,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_25,
__encoder);
::rustc_serialize::Encodable::<__E>::encode(__binding_26,
__encoder);
}
}
}
}
};Encodable, const _: () =
{
impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
for CodegenContext {
fn decode(__decoder: &mut __D) -> Self {
CodegenContext {
lto: ::rustc_serialize::Decodable::decode(__decoder),
use_linker_plugin_lto: ::rustc_serialize::Decodable::decode(__decoder),
dylib_lto: ::rustc_serialize::Decodable::decode(__decoder),
prefer_dynamic: ::rustc_serialize::Decodable::decode(__decoder),
save_temps: ::rustc_serialize::Decodable::decode(__decoder),
fewer_names: ::rustc_serialize::Decodable::decode(__decoder),
time_trace: ::rustc_serialize::Decodable::decode(__decoder),
crate_types: ::rustc_serialize::Decodable::decode(__decoder),
output_filenames: ::rustc_serialize::Decodable::decode(__decoder),
invocation_temp: ::rustc_serialize::Decodable::decode(__decoder),
module_config: ::rustc_serialize::Decodable::decode(__decoder),
opt_level: ::rustc_serialize::Decodable::decode(__decoder),
backend_features: ::rustc_serialize::Decodable::decode(__decoder),
msvc_imps_needed: ::rustc_serialize::Decodable::decode(__decoder),
is_pe_coff: ::rustc_serialize::Decodable::decode(__decoder),
target_can_use_split_dwarf: ::rustc_serialize::Decodable::decode(__decoder),
target_arch: ::rustc_serialize::Decodable::decode(__decoder),
target_is_like_darwin: ::rustc_serialize::Decodable::decode(__decoder),
target_is_like_aix: ::rustc_serialize::Decodable::decode(__decoder),
target_is_like_gpu: ::rustc_serialize::Decodable::decode(__decoder),
split_debuginfo: ::rustc_serialize::Decodable::decode(__decoder),
split_dwarf_kind: ::rustc_serialize::Decodable::decode(__decoder),
pointer_size: ::rustc_serialize::Decodable::decode(__decoder),
remark: ::rustc_serialize::Decodable::decode(__decoder),
remark_dir: ::rustc_serialize::Decodable::decode(__decoder),
incr_comp_session_dir: ::rustc_serialize::Decodable::decode(__decoder),
parallel: ::rustc_serialize::Decodable::decode(__decoder),
}
}
}
};Decodable)]
314pub struct CodegenContext {
315// Resources needed when running LTO
316pub lto: Lto,
317pub use_linker_plugin_lto: bool,
318pub dylib_lto: bool,
319pub prefer_dynamic: bool,
320pub save_temps: bool,
321pub fewer_names: bool,
322pub time_trace: bool,
323pub crate_types: Vec<CrateType>,
324pub output_filenames: Arc<OutputFilenames>,
325pub invocation_temp: Option<String>,
326pub module_config: Arc<ModuleConfig>,
327pub opt_level: OptLevel,
328pub backend_features: Vec<String>,
329pub msvc_imps_needed: bool,
330pub is_pe_coff: bool,
331pub target_can_use_split_dwarf: bool,
332pub target_arch: String,
333pub target_is_like_darwin: bool,
334pub target_is_like_aix: bool,
335pub target_is_like_gpu: bool,
336pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
337pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
338pub pointer_size: Size,
339340/// LLVM optimizations for which we want to print remarks.
341pub remark: Passes,
342/// Directory into which should the LLVM optimization remarks be written.
343 /// If `None`, they will be written to stderr.
344pub remark_dir: Option<PathBuf>,
345/// The incremental compilation session directory, or None if we are not
346 /// compiling incrementally
347pub incr_comp_session_dir: Option<PathBuf>,
348/// `true` if the codegen should be run in parallel.
349 ///
350 /// Depends on [`ExtraBackendMethods::supports_parallel()`] and `-Zno_parallel_backend`.
351pub parallel: bool,
352}
353354fn generate_thin_lto_work<B: WriteBackendMethods>(
355 cgcx: &CodegenContext,
356 prof: &SelfProfilerRef,
357 dcx: DiagCtxtHandle<'_>,
358 exported_symbols_for_lto: &[String],
359 each_linked_rlib_for_lto: &[PathBuf],
360 needs_thin_lto: Vec<ThinLtoInput<B>>,
361) -> Vec<(ThinLtoWorkItem<B>, u64)> {
362let _prof_timer = prof.generic_activity("codegen_thin_generate_lto_work");
363364let (lto_modules, copy_jobs) = B::run_thin_lto(
365cgcx,
366prof,
367dcx,
368exported_symbols_for_lto,
369each_linked_rlib_for_lto,
370needs_thin_lto,
371 );
372lto_modules373 .into_iter()
374 .map(|module| {
375let cost = module.cost();
376 (ThinLtoWorkItem::ThinLto(module), cost)
377 })
378 .chain(copy_jobs.into_iter().map(|wp| {
379 (
380 ThinLtoWorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
381 name: wp.cgu_name.clone(),
382 source: wp,
383 }),
3840, // copying is very cheap
385)
386 }))
387 .collect()
388}
389390enum MaybeLtoModules<B: WriteBackendMethods> {
391 NoLto(CompiledModules),
392 FatLto {
393 cgcx: CodegenContext,
394 exported_symbols_for_lto: Arc<Vec<String>>,
395 each_linked_rlib_file_for_lto: Vec<PathBuf>,
396 needs_fat_lto: Vec<FatLtoInput<B>>,
397 },
398 ThinLto {
399 cgcx: CodegenContext,
400 exported_symbols_for_lto: Arc<Vec<String>>,
401 each_linked_rlib_file_for_lto: Vec<PathBuf>,
402 needs_thin_lto: Vec<ThinLtoInput<B>>,
403 },
404}
405406fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool {
407let sess = tcx.sess;
408sess.opts.cg.embed_bitcode
409 && tcx.crate_types().contains(&CrateType::Rlib)
410 && sess.opts.output_types.contains_key(&OutputType::Exe)
411}
412413fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
414if sess.opts.incremental.is_none() {
415return false;
416 }
417418match sess.lto() {
419 Lto::No => false,
420 Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
421 }
422}
423424pub(crate) fn start_async_codegen<B: ExtraBackendMethods>(
425 backend: B,
426 tcx: TyCtxt<'_>,
427 crate_info: &CrateInfo,
428 allocator_module: Option<ModuleCodegen<B::Module>>,
429) -> OngoingCodegen<B> {
430let (coordinator_send, coordinator_receive) = channel();
431432let no_builtins = {
'done:
{
for i in tcx.hir_krate_attrs() {
#[allow(unused_imports)]
use rustc_hir::attrs::AttributeKind::*;
let i: &rustc_hir::Attribute = i;
match i {
rustc_hir::Attribute::Parsed(NoBuiltins) => {
break 'done Some(());
}
rustc_hir::Attribute::Unparsed(..) =>
{}
#[deny(unreachable_patterns)]
_ => {}
}
}
None
}
}.is_some()find_attr!(tcx, crate, NoBuiltins);
433434let regular_config = ModuleConfig::new(ModuleKind::Regular, tcx, no_builtins);
435let allocator_config = ModuleConfig::new(ModuleKind::Allocator, tcx, no_builtins);
436437let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
438let (codegen_worker_send, codegen_worker_receive) = channel();
439440let coordinator_thread = start_executing_work(
441backend.clone(),
442tcx,
443crate_info,
444shared_emitter,
445codegen_worker_send,
446coordinator_receive,
447Arc::new(regular_config),
448Arc::new(allocator_config),
449allocator_module,
450coordinator_send.clone(),
451 );
452453OngoingCodegen {
454backend,
455456codegen_worker_receive,
457shared_emitter_main,
458 coordinator: Coordinator {
459 sender: coordinator_send,
460 future: Some(coordinator_thread),
461 phantom: PhantomData,
462 },
463 output_filenames: Arc::clone(tcx.output_filenames(())),
464 }
465}
466467fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
468 sess: &Session,
469 compiled_modules: &CompiledModules,
470) -> FxIndexMap<WorkProductId, WorkProduct> {
471let mut work_products = FxIndexMap::default();
472473if sess.opts.incremental.is_none() {
474return work_products;
475 }
476477let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
478479for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
480let mut files = Vec::new();
481if let Some(object_file_path) = &module.object {
482 files.push((OutputType::Object.extension(), object_file_path.as_path()));
483 }
484if let Some(dwarf_object_file_path) = &module.dwarf_object {
485 files.push(("dwo", dwarf_object_file_path.as_path()));
486 }
487if let Some(path) = &module.assembly {
488 files.push((OutputType::Assembly.extension(), path.as_path()));
489 }
490if let Some(path) = &module.llvm_ir {
491 files.push((OutputType::LlvmAssembly.extension(), path.as_path()));
492 }
493if let Some(path) = &module.bytecode {
494 files.push((OutputType::Bitcode.extension(), path.as_path()));
495 }
496if let Some((id, product)) = copy_cgu_workproduct_to_incr_comp_cache_dir(
497 sess,
498&module.name,
499 files.as_slice(),
500&module.links_from_incr_cache,
501 ) {
502 work_products.insert(id, product);
503 }
504 }
505506work_products507}
508509pub fn produce_final_output_artifacts(
510 sess: &Session,
511 compiled_modules: &CompiledModules,
512 crate_output: &OutputFilenames,
513) {
514let mut user_wants_bitcode = false;
515let mut user_wants_objects = false;
516517// Produce final compile outputs.
518let copy_gracefully = |from: &Path, to: &OutFileName| match to {
519 OutFileName::Stdoutif let Err(e) = copy_to_stdout(from) => {
520sess.dcx().emit_err(errors::CopyPath::new(from, to.as_path(), e));
521 }
522 OutFileName::Real(path) if let Err(e) = fs::copy(from, path) => {
523sess.dcx().emit_err(errors::CopyPath::new(from, path, e));
524 }
525_ => {}
526 };
527528let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
529if let [module] = &compiled_modules.modules[..] {
530// 1) Only one codegen unit. In this case it's no difficulty
531 // to copy `foo.0.x` to `foo.x`.
532let path = crate_output.temp_path_for_cgu(
533output_type,
534&module.name,
535sess.invocation_temp.as_deref(),
536 );
537let output = crate_output.path(output_type);
538if !output_type.is_text_output() && output.is_tty() {
539sess.dcx()
540 .emit_err(errors::BinaryOutputToTty { shorthand: output_type.shorthand() });
541 } else {
542copy_gracefully(&path, &output);
543 }
544if !sess.opts.cg.save_temps && !keep_numbered {
545// The user just wants `foo.x`, not `foo.#module-name#.x`.
546ensure_removed(sess.dcx(), &path);
547 }
548 } else {
549if crate_output.outputs.contains_explicit_name(&output_type) {
550// 2) Multiple codegen units, with `--emit foo=some_name`. We have
551 // no good solution for this case, so warn the user.
552sess.dcx()
553 .emit_warn(errors::IgnoringEmitPath { extension: output_type.extension() });
554 } else if crate_output.single_output_file.is_some() {
555// 3) Multiple codegen units, with `-o some_name`. We have
556 // no good solution for this case, so warn the user.
557sess.dcx().emit_warn(errors::IgnoringOutput { extension: output_type.extension() });
558 } else {
559// 4) Multiple codegen units, but no explicit name. We
560 // just leave the `foo.0.x` files in place.
561 // (We don't have to do any work in this case.)
562}
563 }
564 };
565566// Flag to indicate whether the user explicitly requested bitcode.
567 // Otherwise, we produced it only as a temporary output, and will need
568 // to get rid of it.
569for output_type in crate_output.outputs.keys() {
570match *output_type {
571 OutputType::Bitcode => {
572 user_wants_bitcode = true;
573// Copy to .bc, but always keep the .0.bc. There is a later
574 // check to figure out if we should delete .0.bc files, or keep
575 // them for making an rlib.
576copy_if_one_unit(OutputType::Bitcode, true);
577 }
578 OutputType::ThinLinkBitcode => {
579 copy_if_one_unit(OutputType::ThinLinkBitcode, false);
580 }
581 OutputType::LlvmAssembly => {
582 copy_if_one_unit(OutputType::LlvmAssembly, false);
583 }
584 OutputType::Assembly => {
585 copy_if_one_unit(OutputType::Assembly, false);
586 }
587 OutputType::Object => {
588 user_wants_objects = true;
589 copy_if_one_unit(OutputType::Object, true);
590 }
591 OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
592 }
593 }
594595// Clean up unwanted temporary files.
596597 // We create the following files by default:
598 // - #crate#.#module-name#.rcgu.bc
599 // - #crate#.#module-name#.rcgu.o
600 // - #crate#.o (linked from crate.##.rcgu.o)
601 // - #crate#.bc (copied from crate.##.rcgu.bc)
602 // We may create additional files if requested by the user (through
603 // `-C save-temps` or `--emit=` flags).
604605if !sess.opts.cg.save_temps {
606// Remove the temporary .#module-name#.rcgu.o objects. If the user didn't
607 // explicitly request bitcode (with --emit=bc), and the bitcode is not
608 // needed for building an rlib, then we must remove .#module-name#.bc as
609 // well.
610611 // Specific rules for keeping .#module-name#.rcgu.bc:
612 // - If the user requested bitcode (`user_wants_bitcode`), and
613 // codegen_units > 1, then keep it.
614 // - If the user requested bitcode but codegen_units == 1, then we
615 // can toss .#module-name#.rcgu.bc because we copied it to .bc earlier.
616 // - If we're not building an rlib and the user didn't request
617 // bitcode, then delete .#module-name#.rcgu.bc.
618 // If you change how this works, also update back::link::link_rlib,
619 // where .#module-name#.rcgu.bc files are (maybe) deleted after making an
620 // rlib.
621let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
622623let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units().as_usize() > 1;
624625let keep_numbered_objects =
626needs_crate_object || (user_wants_objects && sess.codegen_units().as_usize() > 1);
627628for module in compiled_modules.modules.iter() {
629if !keep_numbered_objects {
630if let Some(ref path) = module.object {
631 ensure_removed(sess.dcx(), path);
632 }
633634if let Some(ref path) = module.dwarf_object {
635 ensure_removed(sess.dcx(), path);
636 }
637 }
638639if let Some(ref path) = module.bytecode {
640if !keep_numbered_bitcode {
641 ensure_removed(sess.dcx(), path);
642 }
643 }
644 }
645646if !user_wants_bitcode647 && let Some(ref allocator_module) = compiled_modules.allocator_module
648 && let Some(ref path) = allocator_module.bytecode
649 {
650ensure_removed(sess.dcx(), path);
651 }
652 }
653654if sess.opts.json_artifact_notifications {
655if let [module] = &compiled_modules.modules[..] {
656module.for_each_output(|_path, ty| {
657if sess.opts.output_types.contains_key(&ty) {
658let descr = ty.shorthand();
659// for single cgu file is renamed to drop cgu specific suffix
660 // so we regenerate it the same way
661let path = crate_output.path(ty);
662sess.dcx().emit_artifact_notification(path.as_path(), descr);
663 }
664 });
665 } else {
666for module in &compiled_modules.modules {
667 module.for_each_output(|path, ty| {
668if sess.opts.output_types.contains_key(&ty) {
669let descr = ty.shorthand();
670 sess.dcx().emit_artifact_notification(&path, descr);
671 }
672 });
673 }
674 }
675 }
676677// We leave the following files around by default:
678 // - #crate#.o
679 // - #crate#.bc
680 // These are used in linking steps and will be cleaned up afterward.
681}
682683pub(crate) enum WorkItem<B: WriteBackendMethods> {
684/// Optimize a newly codegened, totally unoptimized module.
685Optimize(ModuleCodegen<B::Module>),
686/// Copy the post-LTO artifacts from the incremental cache to the output
687 /// directory.
688CopyPostLtoArtifacts(CachedModuleCodegen),
689}
690691enum ThinLtoWorkItem<B: WriteBackendMethods> {
692/// Copy the post-LTO artifacts from the incremental cache to the output
693 /// directory.
694CopyPostLtoArtifacts(CachedModuleCodegen),
695/// Performs thin-LTO on the given module.
696ThinLto(lto::ThinModule<B>),
697}
698699// `pthread_setname()` on *nix ignores anything beyond the first 15
700// bytes. Use short descriptions to maximize the space available for
701// the module name.
702#[cfg(not(windows))]
703fn desc(short: &str, _long: &str, name: &str) -> String {
704// The short label is three bytes, and is followed by a space. That
705 // leaves 11 bytes for the CGU name. How we obtain those 11 bytes
706 // depends on the CGU name form.
707 //
708 // - Non-incremental, e.g. `regex.f10ba03eb5ec7975-cgu.0`: the part
709 // before the `-cgu.0` is the same for every CGU, so use the
710 // `cgu.0` part. The number suffix will be different for each
711 // CGU.
712 //
713 // - Incremental (normal), e.g. `2i52vvl2hco29us0`: use the whole
714 // name because each CGU will have a unique ASCII hash, and the
715 // first 11 bytes will be enough to identify it.
716 //
717 // - Incremental (with `-Zhuman-readable-cgu-names`), e.g.
718 // `regex.f10ba03eb5ec7975-re_builder.volatile`: use the whole
719 // name. The first 11 bytes won't be enough to uniquely identify
720 // it, but no obvious substring will, and this is a rarely used
721 // option so it doesn't matter much.
722 //
723match (&short.len(), &3) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(short.len(), 3);
724let name = if let Some(index) = name.find("-cgu.") {
725&name[index + 1..] // +1 skips the leading '-'.
726} else {
727name728 };
729::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0} {1}", short, name))
})format!("{short} {name}")730}
731732// Windows has no thread name length limit, so use more descriptive names.
733#[cfg(windows)]
734fn desc(_short: &str, long: &str, name: &str) -> String {
735format!("{long} {name}")
736}
737738impl<B: WriteBackendMethods> WorkItem<B> {
739/// Generate a short description of this work item suitable for use as a thread name.
740fn short_description(&self) -> String {
741match self {
742 WorkItem::Optimize(m) => desc("opt", "optimize module", &m.name),
743 WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for", &m.name),
744 }
745 }
746}
747748impl<B: WriteBackendMethods> ThinLtoWorkItem<B> {
749/// Generate a short description of this work item suitable for use as a thread name.
750fn short_description(&self) -> String {
751match self {
752 ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
753desc("cpy", "copy LTO artifacts for", &m.name)
754 }
755 ThinLtoWorkItem::ThinLto(m) => desc("lto", "thin-LTO module", m.name()),
756 }
757 }
758}
759760/// A result produced by the backend.
761pub(crate) enum WorkItemResult<B: WriteBackendMethods> {
762/// The backend has finished compiling a CGU, nothing more required.
763Finished(CompiledModule),
764765/// The backend has finished compiling a CGU, which now needs to go through
766 /// fat LTO.
767NeedsFatLto(FatLtoInput<B>),
768769/// The backend has finished compiling a CGU, which now needs to go through
770 /// thin LTO.
771NeedsThinLto(String, B::ModuleBuffer),
772}
773774pub enum FatLtoInput<B: WriteBackendMethods> {
775 Serialized { name: String, bitcode_path: PathBuf },
776 InMemory(ModuleCodegen<B::Module>),
777}
778779pub enum ThinLtoInput<B: WriteBackendMethods> {
780 Red { name: String, buffer: SerializedModule<B::ModuleBuffer> },
781 Green { wp: WorkProduct, bitcode_path: PathBuf },
782}
783784/// Actual LTO type we end up choosing based on multiple factors.
785pub(crate) enum ComputedLtoType {
786 No,
787 Thin,
788 Fat,
789}
790791pub(crate) fn compute_per_cgu_lto_type(
792 sess_lto: &Lto,
793 linker_does_lto: bool,
794 sess_crate_types: &[CrateType],
795) -> ComputedLtoType {
796// If the linker does LTO, we don't have to do it. Note that we
797 // keep doing full LTO, if it is requested, as not to break the
798 // assumption that the output will be a single module.
799800 // We ignore a request for full crate graph LTO if the crate type
801 // is only an rlib, as there is no full crate graph to process,
802 // that'll happen later.
803 //
804 // This use case currently comes up primarily for targets that
805 // require LTO so the request for LTO is always unconditionally
806 // passed down to the backend, but we don't actually want to do
807 // anything about it yet until we've got a final product.
808let is_rlib = #[allow(non_exhaustive_omitted_patterns)] match sess_crate_types {
[CrateType::Rlib] => true,
_ => false,
}matches!(sess_crate_types, [CrateType::Rlib]);
809810match sess_lto {
811 Lto::ThinLocalif !linker_does_lto => ComputedLtoType::Thin,
812 Lto::Thinif !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
813 Lto::Fatif !is_rlib => ComputedLtoType::Fat,
814_ => ComputedLtoType::No,
815 }
816}
817818fn execute_optimize_work_item<B: WriteBackendMethods>(
819 cgcx: &CodegenContext,
820 prof: &SelfProfilerRef,
821 shared_emitter: SharedEmitter,
822mut module: ModuleCodegen<B::Module>,
823) -> WorkItemResult<B> {
824let _timer = prof.generic_activity_with_arg("codegen_module_optimize", &*module.name);
825826 B::optimize(cgcx, prof, &shared_emitter, &mut module, &cgcx.module_config);
827828// After we've done the initial round of optimizations we need to
829 // decide whether to synchronously codegen this module or ship it
830 // back to the coordinator thread for further LTO processing (which
831 // has to wait for all the initial modules to be optimized).
832833let lto_type =
834compute_per_cgu_lto_type(&cgcx.lto, cgcx.use_linker_plugin_lto, &cgcx.crate_types);
835836// If we're doing some form of incremental LTO then we need to be sure to
837 // save our module to disk first.
838let bitcode = if cgcx.module_config.emit_pre_lto_bc {
839let filename = pre_lto_bitcode_filename(&module.name);
840cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
841 } else {
842None843 };
844845match lto_type {
846 ComputedLtoType::No => {
847let module = B::codegen(cgcx, &prof, &shared_emitter, module, &cgcx.module_config);
848 WorkItemResult::Finished(module)
849 }
850 ComputedLtoType::Thin => {
851let thin_buffer = B::serialize_module(module.module_llvm, true);
852if let Some(path) = bitcode {
853 fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
854{
::core::panicking::panic_fmt(format_args!("Error writing pre-lto-bitcode file `{0}`: {1}",
path.display(), e));
};panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
855 });
856 }
857 WorkItemResult::NeedsThinLto(module.name, thin_buffer)
858 }
859 ComputedLtoType::Fat => match bitcode {
860Some(path) => {
861let buffer = B::serialize_module(module.module_llvm, false);
862 fs::write(&path, buffer.data()).unwrap_or_else(|e| {
863{
::core::panicking::panic_fmt(format_args!("Error writing pre-lto-bitcode file `{0}`: {1}",
path.display(), e));
};panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
864 });
865 WorkItemResult::NeedsFatLto(FatLtoInput::Serialized {
866 name: module.name,
867 bitcode_path: path,
868 })
869 }
870None => WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module)),
871 },
872 }
873}
874875fn execute_copy_from_cache_work_item(
876 cgcx: &CodegenContext,
877 prof: &SelfProfilerRef,
878 shared_emitter: SharedEmitter,
879 module: CachedModuleCodegen,
880) -> CompiledModule {
881let _timer =
882prof.generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name);
883884let dcx = DiagCtxt::new(Box::new(shared_emitter));
885let dcx = dcx.handle();
886887let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
888889let mut links_from_incr_cache = Vec::new();
890891let mut load_from_incr_comp_dir = |output_path: PathBuf, saved_path: &str| {
892let source_file = in_incr_comp_dir(incr_comp_session_dir, saved_path);
893{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_codegen_ssa/src/back/write.rs:893",
"rustc_codegen_ssa::back::write", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_codegen_ssa/src/back/write.rs"),
::tracing_core::__macro_support::Option::Some(893u32),
::tracing_core::__macro_support::Option::Some("rustc_codegen_ssa::back::write"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("copying preexisting module `{0}` from {1:?} to {2}",
module.name, source_file, output_path.display()) as
&dyn Value))])
});
} else { ; }
};debug!(
894"copying preexisting module `{}` from {:?} to {}",
895 module.name,
896 source_file,
897 output_path.display()
898 );
899match link_or_copy(&source_file, &output_path) {
900Ok(_) => {
901links_from_incr_cache.push(source_file);
902Some(output_path)
903 }
904Err(error) => {
905dcx.emit_err(errors::CopyPathBuf { source_file, output_path, error });
906None907 }
908 }
909 };
910911let dwarf_object =
912module.source.saved_files.get("dwo").as_ref().and_then(|saved_dwarf_object_file| {
913let dwarf_obj_out = cgcx914 .output_filenames
915 .split_dwarf_path(
916cgcx.split_debuginfo,
917cgcx.split_dwarf_kind,
918&module.name,
919cgcx.invocation_temp.as_deref(),
920 )
921 .expect(
922"saved dwarf object in work product but `split_dwarf_path` returned `None`",
923 );
924load_from_incr_comp_dir(dwarf_obj_out, saved_dwarf_object_file)
925 });
926927let mut load_from_incr_cache = |perform, output_type: OutputType| {
928if perform {
929let saved_file = module.source.saved_files.get(output_type.extension())?;
930let output_path = cgcx.output_filenames.temp_path_for_cgu(
931output_type,
932&module.name,
933cgcx.invocation_temp.as_deref(),
934 );
935load_from_incr_comp_dir(output_path, &saved_file)
936 } else {
937None938 }
939 };
940941let module_config = &cgcx.module_config;
942let should_emit_obj = module_config.emit_obj != EmitObj::None;
943let assembly = load_from_incr_cache(module_config.emit_asm, OutputType::Assembly);
944let llvm_ir = load_from_incr_cache(module_config.emit_ir, OutputType::LlvmAssembly);
945let bytecode = load_from_incr_cache(module_config.emit_bc, OutputType::Bitcode);
946let object = load_from_incr_cache(should_emit_obj, OutputType::Object);
947if should_emit_obj && object.is_none() {
948dcx.emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name })
949 }
950951CompiledModule {
952links_from_incr_cache,
953 kind: ModuleKind::Regular,
954 name: module.name,
955object,
956dwarf_object,
957bytecode,
958assembly,
959llvm_ir,
960 }
961}
962963fn do_fat_lto<B: WriteBackendMethods>(
964 sess: &Session,
965 cgcx: &CodegenContext,
966 shared_emitter: SharedEmitter,
967 tm_factory: TargetMachineFactoryFn<B>,
968 exported_symbols_for_lto: &[String],
969 each_linked_rlib_for_lto: &[PathBuf],
970 needs_fat_lto: Vec<FatLtoInput<B>>,
971) -> CompiledModule {
972let _timer = sess.prof.verbose_generic_activity("LLVM_fatlto");
973974let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
975let dcx = dcx.handle();
976977check_lto_allowed(&cgcx, dcx);
978979 B::optimize_and_codegen_fat_lto(
980sess,
981cgcx,
982&shared_emitter,
983tm_factory,
984exported_symbols_for_lto,
985each_linked_rlib_for_lto,
986needs_fat_lto,
987 )
988}
989990fn do_thin_lto<B: WriteBackendMethods>(
991 cgcx: &CodegenContext,
992 prof: &SelfProfilerRef,
993 shared_emitter: SharedEmitter,
994 tm_factory: TargetMachineFactoryFn<B>,
995 exported_symbols_for_lto: Arc<Vec<String>>,
996 each_linked_rlib_for_lto: Vec<PathBuf>,
997 needs_thin_lto: Vec<ThinLtoInput<B>>,
998) -> Vec<CompiledModule> {
999let _timer = prof.verbose_generic_activity("LLVM_thinlto");
10001001let dcx = DiagCtxt::new(Box::new(shared_emitter.clone()));
1002let dcx = dcx.handle();
10031004check_lto_allowed(&cgcx, dcx);
10051006let (coordinator_send, coordinator_receive) = channel();
10071008// First up, convert our jobserver into a helper thread so we can use normal
1009 // mpsc channels to manage our messages and such.
1010 // After we've requested tokens then we'll, when we can,
1011 // get tokens on `coordinator_receive` which will
1012 // get managed in the main loop below.
1013let coordinator_send2 = coordinator_send.clone();
1014let helper = jobserver::client()
1015 .into_helper_thread(move |token| {
1016drop(coordinator_send2.send(ThinLtoMessage::Token(token)));
1017 })
1018 .expect("failed to spawn helper thread");
10191020let mut work_items = ::alloc::vec::Vec::new()vec![];
10211022// We have LTO work to do. Perform the serial work here of
1023 // figuring out what we're going to LTO and then push a
1024 // bunch of work items onto our queue to do LTO. This all
1025 // happens on the coordinator thread but it's very quick so
1026 // we don't worry about tokens.
1027for (work, cost) in generate_thin_lto_work::<B>(
1028 cgcx,
1029 prof,
1030 dcx,
1031&exported_symbols_for_lto,
1032&each_linked_rlib_for_lto,
1033 needs_thin_lto,
1034 ) {
1035let insertion_index =
1036 work_items.binary_search_by_key(&cost, |&(_, cost)| cost).unwrap_or_else(|e| e);
1037 work_items.insert(insertion_index, (work, cost));
1038if cgcx.parallel {
1039 helper.request_token();
1040 }
1041 }
10421043let mut codegen_aborted = None;
10441045// These are the Jobserver Tokens we currently hold. Does not include
1046 // the implicit Token the compiler process owns no matter what.
1047let mut tokens = ::alloc::vec::Vec::new()vec![];
10481049// Amount of tokens that are used (including the implicit token).
1050let mut used_token_count = 0;
10511052let mut compiled_modules = ::alloc::vec::Vec::new()vec![];
10531054// Run the message loop while there's still anything that needs message
1055 // processing. Note that as soon as codegen is aborted we simply want to
1056 // wait for all existing work to finish, so many of the conditions here
1057 // only apply if codegen hasn't been aborted as they represent pending
1058 // work to be done.
1059loop {
1060if codegen_aborted.is_none() {
1061if used_token_count == 0 && work_items.is_empty() {
1062// All codegen work is done.
1063break;
1064 }
10651066// Spin up what work we can, only doing this while we've got available
1067 // parallelism slots and work left to spawn.
1068while used_token_count < tokens.len() + 1
1069&& let Some((item, _)) = work_items.pop()
1070 {
1071 spawn_thin_lto_work(
1072&cgcx,
1073 prof,
1074 shared_emitter.clone(),
1075 Arc::clone(&tm_factory),
1076 coordinator_send.clone(),
1077 item,
1078 );
1079 used_token_count += 1;
1080 }
1081 } else {
1082// Don't queue up any more work if codegen was aborted, we're
1083 // just waiting for our existing children to finish.
1084if used_token_count == 0 {
1085break;
1086 }
1087 }
10881089// Relinquish accidentally acquired extra tokens. Subtract 1 for the implicit token.
1090tokens.truncate(used_token_count.saturating_sub(1));
10911092match coordinator_receive.recv().unwrap() {
1093// Save the token locally and the next turn of the loop will use
1094 // this to spawn a new unit of work, or it may get dropped
1095 // immediately if we have no more work to spawn.
1096ThinLtoMessage::Token(token) => match token {
1097Ok(token) => {
1098tokens.push(token);
1099 }
1100Err(e) => {
1101let msg = &::alloc::__export::must_use({
::alloc::fmt::format(format_args!("failed to acquire jobserver token: {0}",
e))
})format!("failed to acquire jobserver token: {e}");
1102shared_emitter.fatal(msg);
1103codegen_aborted = Some(FatalError);
1104 }
1105 },
11061107 ThinLtoMessage::WorkItem { result } => {
1108// If a thread exits successfully then we drop a token associated
1109 // with that worker and update our `used_token_count` count.
1110 // We may later re-acquire a token to continue running more work.
1111 // We may also not actually drop a token here if the worker was
1112 // running with an "ephemeral token".
1113used_token_count -= 1;
11141115match result {
1116Ok(compiled_module) => compiled_modules.push(compiled_module),
1117Err(Some(WorkerFatalError)) => {
1118// Like `CodegenAborted`, wait for remaining work to finish.
1119codegen_aborted = Some(FatalError);
1120 }
1121Err(None) => {
1122// If the thread failed that means it panicked, so
1123 // we abort immediately.
1124::rustc_middle::util::bug::bug_fmt(format_args!("worker thread panicked"));bug!("worker thread panicked");
1125 }
1126 }
1127 }
1128 }
1129 }
11301131if let Some(codegen_aborted) = codegen_aborted {
1132codegen_aborted.raise();
1133 }
11341135compiled_modules1136}
11371138fn execute_thin_lto_work_item<B: WriteBackendMethods>(
1139 cgcx: &CodegenContext,
1140 prof: &SelfProfilerRef,
1141 shared_emitter: SharedEmitter,
1142 tm_factory: TargetMachineFactoryFn<B>,
1143 module: lto::ThinModule<B>,
1144) -> CompiledModule {
1145let _timer = prof.generic_activity_with_arg("codegen_module_perform_lto", module.name());
11461147 B::optimize_and_codegen_thin(cgcx, prof, &shared_emitter, tm_factory, module)
1148}
11491150/// Messages sent to the coordinator.
1151pub(crate) enum Message<B: WriteBackendMethods> {
1152/// A jobserver token has become available. Sent from the jobserver helper
1153 /// thread.
1154Token(io::Result<Acquired>),
11551156/// The backend has finished processing a work item for a codegen unit.
1157 /// Sent from a backend worker thread.
1158WorkItem { result: Result<WorkItemResult<B>, Option<WorkerFatalError>> },
11591160/// The frontend has finished generating something (backend IR or a
1161 /// post-LTO artifact) for a codegen unit, and it should be passed to the
1162 /// backend. Sent from the main thread.
1163CodegenDone { llvm_work_item: WorkItem<B>, cost: u64 },
11641165/// Similar to `CodegenDone`, but for reusing a pre-LTO artifact
1166 /// Sent from the main thread.
1167AddImportOnlyModule { bitcode_path: PathBuf, work_product: WorkProduct },
11681169/// The frontend has finished generating everything for all codegen units.
1170 /// Sent from the main thread.
1171CodegenComplete,
11721173/// Some normal-ish compiler error occurred, and codegen should be wound
1174 /// down. Sent from the main thread.
1175CodegenAborted,
1176}
11771178/// Messages sent to the coordinator.
1179pub(crate) enum ThinLtoMessage {
1180/// A jobserver token has become available. Sent from the jobserver helper
1181 /// thread.
1182Token(io::Result<Acquired>),
11831184/// The backend has finished processing a work item for a codegen unit.
1185 /// Sent from a backend worker thread.
1186WorkItem { result: Result<CompiledModule, Option<WorkerFatalError>> },
1187}
11881189/// A message sent from the coordinator thread to the main thread telling it to
1190/// process another codegen unit.
1191pub struct CguMessage;
11921193// A cut-down version of `rustc_errors::DiagInner` that impls `Send`, which
1194// can be used to send diagnostics from codegen threads to the main thread.
1195// It's missing the following fields from `rustc_errors::DiagInner`.
1196// - `span`: it doesn't impl `Send`.
1197// - `suggestions`: it doesn't impl `Send`, and isn't used for codegen
1198// diagnostics.
1199// - `sort_span`: it doesn't impl `Send`.
1200// - `is_lint`: lints aren't relevant during codegen.
1201// - `emitted_at`: not used for codegen diagnostics.
1202struct Diagnostic {
1203 span: Vec<SpanData>,
1204 level: Level,
1205 messages: Vec<(DiagMessage, Style)>,
1206 code: Option<ErrCode>,
1207 children: Vec<Subdiagnostic>,
1208 args: DiagArgMap,
1209}
12101211// A cut-down version of `rustc_errors::Subdiag` that impls `Send`. It's
1212// missing the following fields from `rustc_errors::Subdiag`.
1213// - `span`: it doesn't impl `Send`.
1214struct Subdiagnostic {
1215 level: Level,
1216 messages: Vec<(DiagMessage, Style)>,
1217}
12181219#[derive(#[automatically_derived]
impl ::core::cmp::PartialEq for MainThreadState {
#[inline]
fn eq(&self, other: &MainThreadState) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::clone::Clone for MainThreadState {
#[inline]
fn clone(&self) -> MainThreadState { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for MainThreadState { }Copy, #[automatically_derived]
impl ::core::fmt::Debug for MainThreadState {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
MainThreadState::Idle => "Idle",
MainThreadState::Codegenning => "Codegenning",
MainThreadState::Lending => "Lending",
})
}
}Debug)]
1220enum MainThreadState {
1221/// Doing nothing.
1222Idle,
12231224/// Doing codegen, i.e. MIR-to-LLVM-IR conversion.
1225Codegenning,
12261227/// Idle, but lending the compiler process's Token to an LLVM thread so it can do useful work.
1228Lending,
1229}
12301231fn start_executing_work<B: ExtraBackendMethods>(
1232 backend: B,
1233 tcx: TyCtxt<'_>,
1234 crate_info: &CrateInfo,
1235 shared_emitter: SharedEmitter,
1236 codegen_worker_send: Sender<CguMessage>,
1237 coordinator_receive: Receiver<Message<B>>,
1238 regular_config: Arc<ModuleConfig>,
1239 allocator_config: Arc<ModuleConfig>,
1240mut allocator_module: Option<ModuleCodegen<B::Module>>,
1241 coordinator_send: Sender<Message<B>>,
1242) -> thread::JoinHandle<Result<MaybeLtoModules<B>, ()>> {
1243let sess = tcx.sess;
1244let prof = sess.prof.clone();
12451246let mut each_linked_rlib_for_lto = Vec::new();
1247let mut each_linked_rlib_file_for_lto = Vec::new();
1248if sess.lto() != Lto::No && sess.lto() != Lto::ThinLocal {
1249drop(link::each_linked_rlib(crate_info, None, &mut |cnum, path| {
1250if link::ignored_for_lto(sess, crate_info, cnum) {
1251return;
1252 }
12531254each_linked_rlib_for_lto.push(cnum);
1255each_linked_rlib_file_for_lto.push(path.to_path_buf());
1256 }));
1257 }
12581259// Compute the set of symbols we need to retain when doing LTO (if we need to)
1260let exported_symbols_for_lto =
1261Arc::new(lto::exported_symbols_for_lto(tcx, &each_linked_rlib_for_lto));
12621263// First up, convert our jobserver into a helper thread so we can use normal
1264 // mpsc channels to manage our messages and such.
1265 // After we've requested tokens then we'll, when we can,
1266 // get tokens on `coordinator_receive` which will
1267 // get managed in the main loop below.
1268let coordinator_send2 = coordinator_send.clone();
1269let helper = jobserver::client()
1270 .into_helper_thread(move |token| {
1271drop(coordinator_send2.send(Message::Token::<B>(token)));
1272 })
1273 .expect("failed to spawn helper thread");
12741275let opt_level = tcx.backend_optimization_level(());
1276let backend_features = tcx.global_backend_features(()).clone();
1277let tm_factory = backend.target_machine_factory(tcx.sess, opt_level, &backend_features);
12781279let remark_dir = if let Some(ref dir) = sess.opts.unstable_opts.remark_dir {
1280let result = fs::create_dir_all(dir).and_then(|_| dir.canonicalize());
1281match result {
1282Ok(dir) => Some(dir),
1283Err(error) => sess.dcx().emit_fatal(ErrorCreatingRemarkDir { error }),
1284 }
1285 } else {
1286None1287 };
12881289let cgcx = CodegenContext {
1290 crate_types: tcx.crate_types().to_vec(),
1291 lto: sess.lto(),
1292 use_linker_plugin_lto: sess.opts.cg.linker_plugin_lto.enabled(),
1293 dylib_lto: sess.opts.unstable_opts.dylib_lto,
1294 prefer_dynamic: sess.opts.cg.prefer_dynamic,
1295 fewer_names: sess.fewer_names(),
1296 save_temps: sess.opts.cg.save_temps,
1297 time_trace: sess.opts.unstable_opts.llvm_time_trace,
1298 remark: sess.opts.cg.remark.clone(),
1299remark_dir,
1300 incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
1301 output_filenames: Arc::clone(tcx.output_filenames(())),
1302 module_config: regular_config,
1303opt_level,
1304backend_features,
1305 msvc_imps_needed: msvc_imps_needed(tcx),
1306 is_pe_coff: tcx.sess.target.is_like_windows,
1307 target_can_use_split_dwarf: tcx.sess.target_can_use_split_dwarf(),
1308 target_arch: tcx.sess.target.arch.to_string(),
1309 target_is_like_darwin: tcx.sess.target.is_like_darwin,
1310 target_is_like_aix: tcx.sess.target.is_like_aix,
1311 target_is_like_gpu: tcx.sess.target.is_like_gpu,
1312 split_debuginfo: tcx.sess.split_debuginfo(),
1313 split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
1314 parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend,
1315 pointer_size: tcx.data_layout.pointer_size(),
1316 invocation_temp: sess.invocation_temp.clone(),
1317 };
13181319// This is the "main loop" of parallel work happening for parallel codegen.
1320 // It's here that we manage parallelism, schedule work, and work with
1321 // messages coming from clients.
1322 //
1323 // There are a few environmental pre-conditions that shape how the system
1324 // is set up:
1325 //
1326 // - Error reporting can only happen on the main thread because that's the
1327 // only place where we have access to the compiler `Session`.
1328 // - LLVM work can be done on any thread.
1329 // - Codegen can only happen on the main thread.
1330 // - Each thread doing substantial work must be in possession of a `Token`
1331 // from the `Jobserver`.
1332 // - The compiler process always holds one `Token`. Any additional `Tokens`
1333 // have to be requested from the `Jobserver`.
1334 //
1335 // Error Reporting
1336 // ===============
1337 // The error reporting restriction is handled separately from the rest: We
1338 // set up a `SharedEmitter` that holds an open channel to the main thread.
1339 // When an error occurs on any thread, the shared emitter will send the
1340 // error message to the receiver main thread (`SharedEmitterMain`). The
1341 // main thread will periodically query this error message queue and emit
1342 // any error messages it has received. It might even abort compilation if
1343 // it has received a fatal error. In this case we rely on all other threads
1344 // being torn down automatically with the main thread.
1345 // Since the main thread will often be busy doing codegen work, error
1346 // reporting will be somewhat delayed, since the message queue can only be
1347 // checked in between two work packages.
1348 //
1349 // Work Processing Infrastructure
1350 // ==============================
1351 // The work processing infrastructure knows three major actors:
1352 //
1353 // - the coordinator thread,
1354 // - the main thread, and
1355 // - LLVM worker threads
1356 //
1357 // The coordinator thread is running a message loop. It instructs the main
1358 // thread about what work to do when, and it will spawn off LLVM worker
1359 // threads as open LLVM WorkItems become available.
1360 //
1361 // The job of the main thread is to codegen CGUs into LLVM work packages
1362 // (since the main thread is the only thread that can do this). The main
1363 // thread will block until it receives a message from the coordinator, upon
1364 // which it will codegen one CGU, send it to the coordinator and block
1365 // again. This way the coordinator can control what the main thread is
1366 // doing.
1367 //
1368 // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
1369 // available, it will spawn off a new LLVM worker thread and let it process
1370 // a WorkItem. When a LLVM worker thread is done with its WorkItem,
1371 // it will just shut down, which also frees all resources associated with
1372 // the given LLVM module, and sends a message to the coordinator that the
1373 // WorkItem has been completed.
1374 //
1375 // Work Scheduling
1376 // ===============
1377 // The scheduler's goal is to minimize the time it takes to complete all
1378 // work there is, however, we also want to keep memory consumption low
1379 // if possible. These two goals are at odds with each other: If memory
1380 // consumption were not an issue, we could just let the main thread produce
1381 // LLVM WorkItems at full speed, assuring maximal utilization of
1382 // Tokens/LLVM worker threads. However, since codegen is usually faster
1383 // than LLVM processing, the queue of LLVM WorkItems would fill up and each
1384 // WorkItem potentially holds on to a substantial amount of memory.
1385 //
1386 // So the actual goal is to always produce just enough LLVM WorkItems as
1387 // not to starve our LLVM worker threads. That means, once we have enough
1388 // WorkItems in our queue, we can block the main thread, so it does not
1389 // produce more until we need them.
1390 //
1391 // Doing LLVM Work on the Main Thread
1392 // ----------------------------------
1393 // Since the main thread owns the compiler process's implicit `Token`, it is
1394 // wasteful to keep it blocked without doing any work. Therefore, what we do
1395 // in this case is: We spawn off an additional LLVM worker thread that helps
1396 // reduce the queue. The work it is doing corresponds to the implicit
1397 // `Token`. The coordinator will mark the main thread as being busy with
1398 // LLVM work. (The actual work happens on another OS thread but we just care
1399 // about `Tokens`, not actual threads).
1400 //
1401 // When any LLVM worker thread finishes while the main thread is marked as
1402 // "busy with LLVM work", we can do a little switcheroo: We give the Token
1403 // of the just finished thread to the LLVM worker thread that is working on
1404 // behalf of the main thread's implicit Token, thus freeing up the main
1405 // thread again. The coordinator can then again decide what the main thread
1406 // should do. This allows the coordinator to make decisions at more points
1407 // in time.
1408 //
1409 // Striking a Balance between Throughput and Memory Consumption
1410 // ------------------------------------------------------------
1411 // Since our two goals, (1) use as many Tokens as possible and (2) keep
1412 // memory consumption as low as possible, are in conflict with each other,
1413 // we have to find a trade off between them. Right now, the goal is to keep
1414 // all workers busy, which means that no worker should find the queue empty
1415 // when it is ready to start.
1416 // How do we do achieve this? Good question :) We actually never know how
1417 // many `Tokens` are potentially available so it's hard to say how much to
1418 // fill up the queue before switching the main thread to LLVM work. Also we
1419 // currently don't have a means to estimate how long a running LLVM worker
1420 // will still be busy with it's current WorkItem. However, we know the
1421 // maximal count of available Tokens that makes sense (=the number of CPU
1422 // cores), so we can take a conservative guess. The heuristic we use here
1423 // is implemented in the `queue_full_enough()` function.
1424 //
1425 // Some Background on Jobservers
1426 // -----------------------------
1427 // It's worth also touching on the management of parallelism here. We don't
1428 // want to just spawn a thread per work item because while that's optimal
1429 // parallelism it may overload a system with too many threads or violate our
1430 // configuration for the maximum amount of cpu to use for this process. To
1431 // manage this we use the `jobserver` crate.
1432 //
1433 // Job servers are an artifact of GNU make and are used to manage
1434 // parallelism between processes. A jobserver is a glorified IPC semaphore
1435 // basically. Whenever we want to run some work we acquire the semaphore,
1436 // and whenever we're done with that work we release the semaphore. In this
1437 // manner we can ensure that the maximum number of parallel workers is
1438 // capped at any one point in time.
1439 //
1440 // LTO and the coordinator thread
1441 // ------------------------------
1442 //
1443 // The final job the coordinator thread is responsible for is managing LTO
1444 // and how that works. When LTO is requested what we'll do is collect all
1445 // optimized LLVM modules into a local vector on the coordinator. Once all
1446 // modules have been codegened and optimized we hand this to the `lto`
1447 // module for further optimization. The `lto` module will return back a list
1448 // of more modules to work on, which the coordinator will continue to spawn
1449 // work for.
1450 //
1451 // Each LLVM module is automatically sent back to the coordinator for LTO if
1452 // necessary. There's already optimizations in place to avoid sending work
1453 // back to the coordinator if LTO isn't requested.
1454let f = move || {
1455let _profiler = if cgcx.time_trace { B::thread_profiler() } else { Box::new(()) };
14561457// This is where we collect codegen units that have gone all the way
1458 // through codegen and LLVM.
1459let mut compiled_modules = ::alloc::vec::Vec::new()vec![];
1460let mut needs_fat_lto = Vec::new();
1461let mut needs_thin_lto = Vec::new();
1462let mut lto_import_only_modules = Vec::new();
14631464/// Possible state transitions:
1465 /// - Ongoing -> Completed
1466 /// - Ongoing -> Aborted
1467 /// - Completed -> Aborted
1468#[derive(#[automatically_derived]
impl ::core::fmt::Debug for CodegenState {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
CodegenState::Ongoing => "Ongoing",
CodegenState::Completed => "Completed",
CodegenState::Aborted => "Aborted",
})
}
}Debug, #[automatically_derived]
impl ::core::cmp::PartialEq for CodegenState {
#[inline]
fn eq(&self, other: &CodegenState) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq)]
1469enum CodegenState {
1470 Ongoing,
1471 Completed,
1472 Aborted,
1473 }
1474use CodegenState::*;
1475let mut codegen_state = Ongoing;
14761477// This is the queue of LLVM work items that still need processing.
1478let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
14791480// This are the Jobserver Tokens we currently hold. Does not include
1481 // the implicit Token the compiler process owns no matter what.
1482let mut tokens = Vec::new();
14831484let mut main_thread_state = MainThreadState::Idle;
14851486// How many LLVM worker threads are running while holding a Token. This
1487 // *excludes* any that the main thread is lending a Token to.
1488let mut running_with_own_token = 0;
14891490// How many LLVM worker threads are running in total. This *includes*
1491 // any that the main thread is lending a Token to.
1492let running_with_any_token = |main_thread_state, running_with_own_token| {
1493running_with_own_token1494 + if main_thread_state == MainThreadState::Lending { 1 } else { 0 }
1495 };
14961497let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
14981499if let Some(allocator_module) = &mut allocator_module {
1500 B::optimize(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config);
1501 }
15021503// Run the message loop while there's still anything that needs message
1504 // processing. Note that as soon as codegen is aborted we simply want to
1505 // wait for all existing work to finish, so many of the conditions here
1506 // only apply if codegen hasn't been aborted as they represent pending
1507 // work to be done.
1508loop {
1509// While there are still CGUs to be codegened, the coordinator has
1510 // to decide how to utilize the compiler processes implicit Token:
1511 // For codegenning more CGU or for running them through LLVM.
1512if codegen_state == Ongoing {
1513if main_thread_state == MainThreadState::Idle {
1514// Compute the number of workers that will be running once we've taken as many
1515 // items from the work queue as we can, plus one for the main thread. It's not
1516 // critically important that we use this instead of just
1517 // `running_with_own_token`, but it prevents the `queue_full_enough` heuristic
1518 // from fluctuating just because a worker finished up and we decreased the
1519 // `running_with_own_token` count, even though we're just going to increase it
1520 // right after this when we put a new worker to work.
1521let extra_tokens = tokens.len().checked_sub(running_with_own_token).unwrap();
1522let additional_running = std::cmp::min(extra_tokens, work_items.len());
1523let anticipated_running = running_with_own_token + additional_running + 1;
15241525if !queue_full_enough(work_items.len(), anticipated_running) {
1526// The queue is not full enough, process more codegen units:
1527if codegen_worker_send.send(CguMessage).is_err() {
1528{
::core::panicking::panic_fmt(format_args!("Could not send CguMessage to main thread"));
}panic!("Could not send CguMessage to main thread")1529 }
1530main_thread_state = MainThreadState::Codegenning;
1531 } else {
1532// The queue is full enough to not let the worker
1533 // threads starve. Use the implicit Token to do some
1534 // LLVM work too.
1535let (item, _) =
1536work_items.pop().expect("queue empty - queue_full_enough() broken?");
1537main_thread_state = MainThreadState::Lending;
1538spawn_work(
1539&cgcx,
1540&prof,
1541shared_emitter.clone(),
1542coordinator_send.clone(),
1543&mut llvm_start_time,
1544item,
1545 );
1546 }
1547 }
1548 } else if codegen_state == Completed {
1549if running_with_any_token(main_thread_state, running_with_own_token) == 0
1550&& work_items.is_empty()
1551 {
1552// All codegen work is done.
1553break;
1554 }
15551556// In this branch, we know that everything has been codegened,
1557 // so it's just a matter of determining whether the implicit
1558 // Token is free to use for LLVM work.
1559match main_thread_state {
1560 MainThreadState::Idle => {
1561if let Some((item, _)) = work_items.pop() {
1562main_thread_state = MainThreadState::Lending;
1563spawn_work(
1564&cgcx,
1565&prof,
1566shared_emitter.clone(),
1567coordinator_send.clone(),
1568&mut llvm_start_time,
1569item,
1570 );
1571 } else {
1572// There is no unstarted work, so let the main thread
1573 // take over for a running worker. Otherwise the
1574 // implicit token would just go to waste.
1575 // We reduce the `running` counter by one. The
1576 // `tokens.truncate()` below will take care of
1577 // giving the Token back.
1578if !(running_with_own_token > 0) {
::core::panicking::panic("assertion failed: running_with_own_token > 0")
};assert!(running_with_own_token > 0);
1579running_with_own_token -= 1;
1580main_thread_state = MainThreadState::Lending;
1581 }
1582 }
1583 MainThreadState::Codegenning => ::rustc_middle::util::bug::bug_fmt(format_args!("codegen worker should not be codegenning after codegen was already completed"))bug!(
1584"codegen worker should not be codegenning after \
1585 codegen was already completed"
1586),
1587 MainThreadState::Lending => {
1588// Already making good use of that token
1589}
1590 }
1591 } else {
1592// Don't queue up any more work if codegen was aborted, we're
1593 // just waiting for our existing children to finish.
1594if !(codegen_state == Aborted) {
::core::panicking::panic("assertion failed: codegen_state == Aborted")
};assert!(codegen_state == Aborted);
1595if running_with_any_token(main_thread_state, running_with_own_token) == 0 {
1596break;
1597 }
1598 }
15991600// Spin up what work we can, only doing this while we've got available
1601 // parallelism slots and work left to spawn.
1602if codegen_state != Aborted {
1603while running_with_own_token < tokens.len()
1604 && let Some((item, _)) = work_items.pop()
1605 {
1606 spawn_work(
1607&cgcx,
1608&prof,
1609 shared_emitter.clone(),
1610 coordinator_send.clone(),
1611&mut llvm_start_time,
1612 item,
1613 );
1614 running_with_own_token += 1;
1615 }
1616 }
16171618// Relinquish accidentally acquired extra tokens.
1619tokens.truncate(running_with_own_token);
16201621match coordinator_receive.recv().unwrap() {
1622// Save the token locally and the next turn of the loop will use
1623 // this to spawn a new unit of work, or it may get dropped
1624 // immediately if we have no more work to spawn.
1625Message::Token(token) => {
1626match token {
1627Ok(token) => {
1628tokens.push(token);
16291630if main_thread_state == MainThreadState::Lending {
1631// If the main thread token is used for LLVM work
1632 // at the moment, we turn that thread into a regular
1633 // LLVM worker thread, so the main thread is free
1634 // to react to codegen demand.
1635main_thread_state = MainThreadState::Idle;
1636running_with_own_token += 1;
1637 }
1638 }
1639Err(e) => {
1640let msg = &::alloc::__export::must_use({
::alloc::fmt::format(format_args!("failed to acquire jobserver token: {0}",
e))
})format!("failed to acquire jobserver token: {e}");
1641shared_emitter.fatal(msg);
1642codegen_state = Aborted;
1643 }
1644 }
1645 }
16461647 Message::CodegenDone { llvm_work_item, cost } => {
1648// We keep the queue sorted by estimated processing cost,
1649 // so that more expensive items are processed earlier. This
1650 // is good for throughput as it gives the main thread more
1651 // time to fill up the queue and it avoids scheduling
1652 // expensive items to the end.
1653 // Note, however, that this is not ideal for memory
1654 // consumption, as LLVM module sizes are not evenly
1655 // distributed.
1656let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
1657let insertion_index = match insertion_index {
1658Ok(idx) | Err(idx) => idx,
1659 };
1660work_items.insert(insertion_index, (llvm_work_item, cost));
16611662if cgcx.parallel {
1663helper.request_token();
1664 }
1665match (&main_thread_state, &MainThreadState::Codegenning) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1666main_thread_state = MainThreadState::Idle;
1667 }
16681669 Message::CodegenComplete => {
1670if codegen_state != Aborted {
1671codegen_state = Completed;
1672 }
1673match (&main_thread_state, &MainThreadState::Codegenning) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1674main_thread_state = MainThreadState::Idle;
1675 }
16761677// If codegen is aborted that means translation was aborted due
1678 // to some normal-ish compiler error. In this situation we want
1679 // to exit as soon as possible, but we want to make sure all
1680 // existing work has finished. Flag codegen as being done, and
1681 // then conditions above will ensure no more work is spawned but
1682 // we'll keep executing this loop until `running_with_own_token`
1683 // hits 0.
1684Message::CodegenAborted => {
1685codegen_state = Aborted;
1686 }
16871688 Message::WorkItem { result } => {
1689// If a thread exits successfully then we drop a token associated
1690 // with that worker and update our `running_with_own_token` count.
1691 // We may later re-acquire a token to continue running more work.
1692 // We may also not actually drop a token here if the worker was
1693 // running with an "ephemeral token".
1694if main_thread_state == MainThreadState::Lending {
1695main_thread_state = MainThreadState::Idle;
1696 } else {
1697running_with_own_token -= 1;
1698 }
16991700match result {
1701Ok(WorkItemResult::Finished(compiled_module)) => {
1702compiled_modules.push(compiled_module);
1703 }
1704Ok(WorkItemResult::NeedsFatLto(fat_lto_input)) => {
1705if !needs_thin_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_thin_lto.is_empty()")
};assert!(needs_thin_lto.is_empty());
1706needs_fat_lto.push(fat_lto_input);
1707 }
1708Ok(WorkItemResult::NeedsThinLto(name, thin_buffer)) => {
1709if !needs_fat_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_fat_lto.is_empty()")
};assert!(needs_fat_lto.is_empty());
1710needs_thin_lto.push(ThinLtoInput::Red {
1711name,
1712 buffer: SerializedModule::Local(thin_buffer),
1713 });
1714 }
1715Err(Some(WorkerFatalError)) => {
1716// Like `CodegenAborted`, wait for remaining work to finish.
1717codegen_state = Aborted;
1718 }
1719Err(None) => {
1720// If the thread failed that means it panicked, so
1721 // we abort immediately.
1722::rustc_middle::util::bug::bug_fmt(format_args!("worker thread panicked"));bug!("worker thread panicked");
1723 }
1724 }
1725 }
17261727 Message::AddImportOnlyModule { bitcode_path, work_product } => {
1728match (&codegen_state, &Ongoing) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(codegen_state, Ongoing);
1729match (&main_thread_state, &MainThreadState::Codegenning) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(main_thread_state, MainThreadState::Codegenning);
1730lto_import_only_modules.push((bitcode_path, work_product));
1731main_thread_state = MainThreadState::Idle;
1732 }
1733 }
1734 }
17351736// Drop to print timings
1737drop(llvm_start_time);
17381739if codegen_state == Aborted {
1740return Err(());
1741 }
17421743drop(codegen_state);
1744drop(tokens);
1745drop(helper);
1746if !work_items.is_empty() {
::core::panicking::panic("assertion failed: work_items.is_empty()")
};assert!(work_items.is_empty());
17471748if !needs_fat_lto.is_empty() {
1749if !compiled_modules.is_empty() {
::core::panicking::panic("assertion failed: compiled_modules.is_empty()")
};assert!(compiled_modules.is_empty());
1750if !needs_thin_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_thin_lto.is_empty()")
};assert!(needs_thin_lto.is_empty());
17511752if let Some(allocator_module) = allocator_module.take() {
1753needs_fat_lto.push(FatLtoInput::InMemory(allocator_module));
1754 }
17551756for (bitcode_path, wp) in lto_import_only_modules {
1757 needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, bitcode_path })
1758 }
17591760return Ok(MaybeLtoModules::FatLto {
1761cgcx,
1762exported_symbols_for_lto,
1763each_linked_rlib_file_for_lto,
1764needs_fat_lto,
1765 });
1766 } else if !needs_thin_lto.is_empty() || !lto_import_only_modules.is_empty() {
1767if !compiled_modules.is_empty() {
::core::panicking::panic("assertion failed: compiled_modules.is_empty()")
};assert!(compiled_modules.is_empty());
1768if !needs_fat_lto.is_empty() {
::core::panicking::panic("assertion failed: needs_fat_lto.is_empty()")
};assert!(needs_fat_lto.is_empty());
17691770for (bitcode_path, wp) in lto_import_only_modules {
1771 needs_thin_lto.push(ThinLtoInput::Green { wp, bitcode_path })
1772 }
17731774if cgcx.lto == Lto::ThinLocal {
1775compiled_modules.extend(do_thin_lto::<B>(
1776&cgcx,
1777&prof,
1778shared_emitter.clone(),
1779tm_factory,
1780exported_symbols_for_lto,
1781each_linked_rlib_file_for_lto,
1782needs_thin_lto,
1783 ));
1784 } else {
1785if let Some(allocator_module) = allocator_module.take() {
1786let thin_buffer = B::serialize_module(allocator_module.module_llvm, true);
1787needs_thin_lto.push(ThinLtoInput::Red {
1788 name: allocator_module.name,
1789 buffer: SerializedModule::Local(thin_buffer),
1790 });
1791 }
17921793return Ok(MaybeLtoModules::ThinLto {
1794cgcx,
1795exported_symbols_for_lto,
1796each_linked_rlib_file_for_lto,
1797needs_thin_lto,
1798 });
1799 }
1800 }
18011802Ok(MaybeLtoModules::NoLto(CompiledModules {
1803 modules: compiled_modules,
1804 allocator_module: allocator_module.map(|allocator_module| {
1805 B::codegen(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config)
1806 }),
1807 }))
1808 };
1809return std::thread::Builder::new()
1810 .name("coordinator".to_owned())
1811 .spawn(f)
1812 .expect("failed to spawn coordinator thread");
18131814// A heuristic that determines if we have enough LLVM WorkItems in the
1815 // queue so that the main thread can do LLVM work instead of codegen
1816fn queue_full_enough(items_in_queue: usize, workers_running: usize) -> bool {
1817// This heuristic scales ahead-of-time codegen according to available
1818 // concurrency, as measured by `workers_running`. The idea is that the
1819 // more concurrency we have available, the more demand there will be for
1820 // work items, and the fuller the queue should be kept to meet demand.
1821 // An important property of this approach is that we codegen ahead of
1822 // time only as much as necessary, so as to keep fewer LLVM modules in
1823 // memory at once, thereby reducing memory consumption.
1824 //
1825 // When the number of workers running is less than the max concurrency
1826 // available to us, this heuristic can cause us to instruct the main
1827 // thread to work on an LLVM item (that is, tell it to "LLVM") instead
1828 // of codegen, even though it seems like it *should* be codegenning so
1829 // that we can create more work items and spawn more LLVM workers.
1830 //
1831 // But this is not a problem. When the main thread is told to LLVM,
1832 // according to this heuristic and how work is scheduled, there is
1833 // always at least one item in the queue, and therefore at least one
1834 // pending jobserver token request. If there *is* more concurrency
1835 // available, we will immediately receive a token, which will upgrade
1836 // the main thread's LLVM worker to a real one (conceptually), and free
1837 // up the main thread to codegen if necessary. On the other hand, if
1838 // there isn't more concurrency, then the main thread working on an LLVM
1839 // item is appropriate, as long as the queue is full enough for demand.
1840 //
1841 // Speaking of which, how full should we keep the queue? Probably less
1842 // full than you'd think. A lot has to go wrong for the queue not to be
1843 // full enough and for that to have a negative effect on compile times.
1844 //
1845 // Workers are unlikely to finish at exactly the same time, so when one
1846 // finishes and takes another work item off the queue, we often have
1847 // ample time to codegen at that point before the next worker finishes.
1848 // But suppose that codegen takes so long that the workers exhaust the
1849 // queue, and we have one or more workers that have nothing to work on.
1850 // Well, it might not be so bad. Of all the LLVM modules we create and
1851 // optimize, one has to finish last. It's not necessarily the case that
1852 // by losing some concurrency for a moment, we delay the point at which
1853 // that last LLVM module is finished and the rest of compilation can
1854 // proceed. Also, when we can't take advantage of some concurrency, we
1855 // give tokens back to the job server. That enables some other rustc to
1856 // potentially make use of the available concurrency. That could even
1857 // *decrease* overall compile time if we're lucky. But yes, if no other
1858 // rustc can make use of the concurrency, then we've squandered it.
1859 //
1860 // However, keeping the queue full is also beneficial when we have a
1861 // surge in available concurrency. Then items can be taken from the
1862 // queue immediately, without having to wait for codegen.
1863 //
1864 // So, the heuristic below tries to keep one item in the queue for every
1865 // four running workers. Based on limited benchmarking, this appears to
1866 // be more than sufficient to avoid increasing compilation times.
1867let quarter_of_workers = workers_running - 3 * workers_running / 4;
1868items_in_queue > 0 && items_in_queue >= quarter_of_workers1869 }
1870}
18711872/// `FatalError` is explicitly not `Send`.
1873#[must_use]
1874pub(crate) struct WorkerFatalError;
18751876fn spawn_work<'a, B: WriteBackendMethods>(
1877 cgcx: &CodegenContext,
1878 prof: &'a SelfProfilerRef,
1879 shared_emitter: SharedEmitter,
1880 coordinator_send: Sender<Message<B>>,
1881 llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
1882 work: WorkItem<B>,
1883) {
1884if llvm_start_time.is_none() {
1885*llvm_start_time = Some(prof.verbose_generic_activity("LLVM_passes"));
1886 }
18871888let cgcx = cgcx.clone();
1889let prof = prof.clone();
18901891let name = work.short_description();
1892let f = move || {
1893let _profiler = if cgcx.time_trace { B::thread_profiler() } else { Box::new(()) };
18941895let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1896 WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, &prof, shared_emitter, m),
1897 WorkItem::CopyPostLtoArtifacts(m) => WorkItemResult::Finished(
1898execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m),
1899 ),
1900 }));
19011902let msg = match result {
1903Ok(result) => Message::WorkItem::<B> { result: Ok(result) },
19041905// We ignore any `FatalError` coming out of `execute_work_item`, as a
1906 // diagnostic was already sent off to the main thread - just surface
1907 // that there was an error in this worker.
1908Err(err) if err.is::<FatalErrorMarker>() => {
1909 Message::WorkItem::<B> { result: Err(Some(WorkerFatalError)) }
1910 }
19111912Err(_) => Message::WorkItem::<B> { result: Err(None) },
1913 };
1914drop(coordinator_send.send(msg));
1915 };
1916 std::thread::Builder::new().name(name).spawn(f).expect("failed to spawn work thread");
1917}
19181919fn spawn_thin_lto_work<B: WriteBackendMethods>(
1920 cgcx: &CodegenContext,
1921 prof: &SelfProfilerRef,
1922 shared_emitter: SharedEmitter,
1923 tm_factory: TargetMachineFactoryFn<B>,
1924 coordinator_send: Sender<ThinLtoMessage>,
1925 work: ThinLtoWorkItem<B>,
1926) {
1927let cgcx = cgcx.clone();
1928let prof = prof.clone();
19291930let name = work.short_description();
1931let f = move || {
1932let _profiler = if cgcx.time_trace { B::thread_profiler() } else { Box::new(()) };
19331934let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work {
1935 ThinLtoWorkItem::CopyPostLtoArtifacts(m) => {
1936execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m)
1937 }
1938 ThinLtoWorkItem::ThinLto(m) => {
1939execute_thin_lto_work_item(&cgcx, &prof, shared_emitter, tm_factory, m)
1940 }
1941 }));
19421943let msg = match result {
1944Ok(result) => ThinLtoMessage::WorkItem { result: Ok(result) },
19451946// We ignore any `FatalError` coming out of `execute_work_item`, as a
1947 // diagnostic was already sent off to the main thread - just surface
1948 // that there was an error in this worker.
1949Err(err) if err.is::<FatalErrorMarker>() => {
1950 ThinLtoMessage::WorkItem { result: Err(Some(WorkerFatalError)) }
1951 }
19521953Err(_) => ThinLtoMessage::WorkItem { result: Err(None) },
1954 };
1955drop(coordinator_send.send(msg));
1956 };
1957 std::thread::Builder::new().name(name).spawn(f).expect("failed to spawn work thread");
1958}
19591960enum SharedEmitterMessage {
1961 Diagnostic(Diagnostic),
1962 InlineAsmError(InlineAsmError),
1963 Fatal(String),
1964}
19651966pub struct InlineAsmError {
1967pub span: SpanData,
1968pub msg: String,
1969pub level: Level,
1970pub source: Option<(String, Vec<InnerSpan>)>,
1971}
19721973#[derive(#[automatically_derived]
impl ::core::clone::Clone for SharedEmitter {
#[inline]
fn clone(&self) -> SharedEmitter {
SharedEmitter { sender: ::core::clone::Clone::clone(&self.sender) }
}
}Clone)]
1974pub struct SharedEmitter {
1975 sender: Sender<SharedEmitterMessage>,
1976}
19771978pub struct SharedEmitterMain {
1979 receiver: Receiver<SharedEmitterMessage>,
1980}
19811982impl SharedEmitter {
1983fn new() -> (SharedEmitter, SharedEmitterMain) {
1984let (sender, receiver) = channel();
19851986 (SharedEmitter { sender }, SharedEmitterMain { receiver })
1987 }
19881989pub fn inline_asm_error(&self, err: InlineAsmError) {
1990drop(self.sender.send(SharedEmitterMessage::InlineAsmError(err)));
1991 }
19921993fn fatal(&self, msg: &str) {
1994drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
1995 }
1996}
19971998impl Emitterfor SharedEmitter {
1999fn emit_diagnostic(&mut self, mut diag: rustc_errors::DiagInner) {
2000// Check that we aren't missing anything interesting when converting to
2001 // the cut-down local `DiagInner`.
2002if !!diag.span.has_span_labels() {
::core::panicking::panic("assertion failed: !diag.span.has_span_labels()")
};assert!(!diag.span.has_span_labels());
2003match (&diag.suggestions, &Suggestions::Enabled(::alloc::vec::Vec::new())) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(diag.suggestions, Suggestions::Enabled(vec![]));
2004match (&diag.sort_span, &rustc_span::DUMMY_SP) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(diag.sort_span, rustc_span::DUMMY_SP);
2005match (&diag.is_lint, &None) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val, &*right_val,
::core::option::Option::None);
}
}
};assert_eq!(diag.is_lint, None);
2006// No sensible check for `diag.emitted_at`.
20072008let args = mem::replace(&mut diag.args, DiagArgMap::default());
2009drop(
2010self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
2011 span: diag.span.primary_spans().iter().map(|span| span.data()).collect::<Vec<_>>(),
2012 level: diag.level(),
2013 messages: diag.messages,
2014 code: diag.code,
2015 children: diag2016 .children
2017 .into_iter()
2018 .map(|child| Subdiagnostic { level: child.level, messages: child.messages })
2019 .collect(),
2020args,
2021 })),
2022 );
2023 }
20242025fn source_map(&self) -> Option<&SourceMap> {
2026None2027 }
2028}
20292030impl SharedEmitterMain {
2031fn check(&self, sess: &Session, blocking: bool) {
2032loop {
2033let message = if blocking {
2034match self.receiver.recv() {
2035Ok(message) => Ok(message),
2036Err(_) => Err(()),
2037 }
2038 } else {
2039match self.receiver.try_recv() {
2040Ok(message) => Ok(message),
2041Err(_) => Err(()),
2042 }
2043 };
20442045match message {
2046Ok(SharedEmitterMessage::Diagnostic(diag)) => {
2047// The diagnostic has been received on the main thread.
2048 // Convert it back to a full `Diagnostic` and emit.
2049let dcx = sess.dcx();
2050let mut d =
2051 rustc_errors::DiagInner::new_with_messages(diag.level, diag.messages);
2052d.span = MultiSpan::from_spans(
2053diag.span.into_iter().map(|span| span.span()).collect(),
2054 );
2055d.code = diag.code; // may be `None`, that's ok
2056d.children = diag2057 .children
2058 .into_iter()
2059 .map(|sub| rustc_errors::Subdiag {
2060 level: sub.level,
2061 messages: sub.messages,
2062 span: MultiSpan::new(),
2063 })
2064 .collect();
2065d.args = diag.args;
2066dcx.emit_diagnostic(d);
2067sess.dcx().abort_if_errors();
2068 }
2069Ok(SharedEmitterMessage::InlineAsmError(inner)) => {
2070{
match inner.level {
Level::Error | Level::Warning | Level::Note => {}
ref left_val => {
::core::panicking::assert_matches_failed(left_val,
"Level::Error | Level::Warning | Level::Note",
::core::option::Option::None);
}
}
};assert_matches!(inner.level, Level::Error | Level::Warning | Level::Note);
2071let mut err = Diag::<()>::new(sess.dcx(), inner.level, inner.msg);
2072if !inner.span.is_dummy() {
2073err.span(inner.span.span());
2074 }
20752076// Point to the generated assembly if it is available.
2077if let Some((buffer, spans)) = inner.source {
2078let source = sess2079 .source_map()
2080 .new_source_file(FileName::inline_asm_source_code(&buffer), buffer);
2081let spans: Vec<_> = spans2082 .iter()
2083 .map(|sp| {
2084Span::with_root_ctxt(
2085source.normalized_byte_pos(sp.start as u32),
2086source.normalized_byte_pos(sp.end as u32),
2087 )
2088 })
2089 .collect();
2090err.span_note(spans, "instantiated into assembly here");
2091 }
20922093err.emit();
2094 }
2095Ok(SharedEmitterMessage::Fatal(msg)) => {
2096sess.dcx().fatal(msg);
2097 }
2098Err(_) => {
2099break;
2100 }
2101 }
2102 }
2103 }
2104}
21052106pub struct Coordinator<B: WriteBackendMethods> {
2107 sender: Sender<Message<B>>,
2108 future: Option<thread::JoinHandle<Result<MaybeLtoModules<B>, ()>>>,
2109// Only used for the Message type.
2110phantom: PhantomData<B>,
2111}
21122113impl<B: WriteBackendMethods> Coordinator<B> {
2114fn join(mut self) -> std::thread::Result<Result<MaybeLtoModules<B>, ()>> {
2115self.future.take().unwrap().join()
2116 }
2117}
21182119impl<B: WriteBackendMethods> Dropfor Coordinator<B> {
2120fn drop(&mut self) {
2121if let Some(future) = self.future.take() {
2122// If we haven't joined yet, signal to the coordinator that it should spawn no more
2123 // work, and wait for worker threads to finish.
2124drop(self.sender.send(Message::CodegenAborted::<B>));
2125drop(future.join());
2126 }
2127 }
2128}
21292130pub struct OngoingCodegen<B: WriteBackendMethods> {
2131 backend: B,
2132 output_filenames: Arc<OutputFilenames>,
2133// Field order below is intended to terminate the coordinator thread before two fields below
2134 // drop and prematurely close channels used by coordinator thread. See `Coordinator`'s
2135 // `Drop` implementation for more info.
2136pub(crate) coordinator: Coordinator<B>,
2137 codegen_worker_receive: Receiver<CguMessage>,
2138 shared_emitter_main: SharedEmitterMain,
2139}
21402141impl<B: WriteBackendMethods> OngoingCodegen<B> {
2142pub fn join(self, sess: &Session) -> (CompiledModules, FxIndexMap<WorkProductId, WorkProduct>) {
2143self.shared_emitter_main.check(sess, true);
21442145let maybe_lto_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
2146Ok(Ok(maybe_lto_modules)) => maybe_lto_modules,
2147Ok(Err(())) => {
2148sess.dcx().abort_if_errors();
2149{
::core::panicking::panic_fmt(format_args!("expected abort due to worker thread errors"));
}panic!("expected abort due to worker thread errors")2150 }
2151Err(_) => {
2152::rustc_middle::util::bug::bug_fmt(format_args!("panic during codegen/LLVM phase"));bug!("panic during codegen/LLVM phase");
2153 }
2154 });
21552156sess.dcx().abort_if_errors();
21572158let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
21592160// Catch fatal errors to ensure shared_emitter_main.check() can emit the actual diagnostics
2161let compiled_modules = catch_fatal_errors(|| match maybe_lto_modules {
2162 MaybeLtoModules::NoLto(compiled_modules) => {
2163drop(shared_emitter);
2164compiled_modules2165 }
2166 MaybeLtoModules::FatLto {
2167 cgcx,
2168 exported_symbols_for_lto,
2169 each_linked_rlib_file_for_lto,
2170 needs_fat_lto,
2171 } => {
2172let tm_factory = self.backend.target_machine_factory(
2173sess,
2174cgcx.opt_level,
2175&cgcx.backend_features,
2176 );
21772178CompiledModules {
2179 modules: ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[do_fat_lto(sess, &cgcx, shared_emitter, tm_factory,
&exported_symbols_for_lto, &each_linked_rlib_file_for_lto,
needs_fat_lto)]))vec![do_fat_lto(
2180 sess,
2181&cgcx,
2182 shared_emitter,
2183 tm_factory,
2184&exported_symbols_for_lto,
2185&each_linked_rlib_file_for_lto,
2186 needs_fat_lto,
2187 )],
2188 allocator_module: None,
2189 }
2190 }
2191 MaybeLtoModules::ThinLto {
2192 cgcx,
2193 exported_symbols_for_lto,
2194 each_linked_rlib_file_for_lto,
2195 needs_thin_lto,
2196 } => {
2197let tm_factory = self.backend.target_machine_factory(
2198sess,
2199cgcx.opt_level,
2200&cgcx.backend_features,
2201 );
22022203CompiledModules {
2204 modules: do_thin_lto::<B>(
2205&cgcx,
2206&sess.prof,
2207shared_emitter,
2208tm_factory,
2209exported_symbols_for_lto,
2210each_linked_rlib_file_for_lto,
2211needs_thin_lto,
2212 ),
2213 allocator_module: None,
2214 }
2215 }
2216 });
22172218shared_emitter_main.check(sess, true);
22192220sess.dcx().abort_if_errors();
22212222let mut compiled_modules =
2223compiled_modules.expect("fatal error emitted but not sent to SharedEmitter");
22242225// Regardless of what order these modules completed in, report them to
2226 // the backend in the same order every time to ensure that we're handing
2227 // out deterministic results.
2228compiled_modules.modules.sort_by(|a, b| a.name.cmp(&b.name));
22292230let work_products =
2231copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
2232produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
22332234 (compiled_modules, work_products)
2235 }
22362237pub(crate) fn codegen_finished(&self, tcx: TyCtxt<'_>) {
2238self.wait_for_signal_to_codegen_item();
2239self.check_for_errors(tcx.sess);
2240drop(self.coordinator.sender.send(Message::CodegenComplete::<B>));
2241 }
22422243pub(crate) fn check_for_errors(&self, sess: &Session) {
2244self.shared_emitter_main.check(sess, false);
2245 }
22462247pub(crate) fn wait_for_signal_to_codegen_item(&self) {
2248match self.codegen_worker_receive.recv() {
2249Ok(CguMessage) => {
2250// Ok to proceed.
2251}
2252Err(_) => {
2253// One of the LLVM threads must have panicked, fall through so
2254 // error handling can be reached.
2255}
2256 }
2257 }
2258}
22592260pub(crate) fn submit_codegened_module_to_llvm<B: WriteBackendMethods>(
2261 coordinator: &Coordinator<B>,
2262 module: ModuleCodegen<B::Module>,
2263 cost: u64,
2264) {
2265let llvm_work_item = WorkItem::Optimize(module);
2266drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost }));
2267}
22682269pub(crate) fn submit_post_lto_module_to_llvm<B: WriteBackendMethods>(
2270 coordinator: &Coordinator<B>,
2271 module: CachedModuleCodegen,
2272) {
2273let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
2274drop(coordinator.sender.send(Message::CodegenDone::<B> { llvm_work_item, cost: 0 }));
2275}
22762277pub(crate) fn submit_pre_lto_module_to_llvm<B: WriteBackendMethods>(
2278 tcx: TyCtxt<'_>,
2279 coordinator: &Coordinator<B>,
2280 module: CachedModuleCodegen,
2281) {
2282let filename = pre_lto_bitcode_filename(&module.name);
2283let bitcode_path = in_incr_comp_dir_sess(tcx.sess, &filename);
2284// Schedule the module to be loaded
2285drop(
2286coordinator2287 .sender
2288 .send(Message::AddImportOnlyModule::<B> { bitcode_path, work_product: module.source }),
2289 );
2290}
22912292fn pre_lto_bitcode_filename(module_name: &str) -> String {
2293::alloc::__export::must_use({
::alloc::fmt::format(format_args!("{0}.{1}", module_name,
PRE_LTO_BC_EXT))
})format!("{module_name}.{PRE_LTO_BC_EXT}")2294}
22952296fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
2297// This should never be true (because it's not supported). If it is true,
2298 // something is wrong with commandline arg validation.
2299if !!(tcx.sess.opts.cg.linker_plugin_lto.enabled() &&
tcx.sess.target.is_like_windows &&
tcx.sess.opts.cg.prefer_dynamic) {
::core::panicking::panic("assertion failed: !(tcx.sess.opts.cg.linker_plugin_lto.enabled() &&\n tcx.sess.target.is_like_windows &&\n tcx.sess.opts.cg.prefer_dynamic)")
};assert!(
2300 !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
2301 && tcx.sess.target.is_like_windows
2302 && tcx.sess.opts.cg.prefer_dynamic)
2303 );
23042305// We need to generate _imp__ symbol if we are generating an rlib or we include one
2306 // indirectly from ThinLTO. In theory these are not needed as ThinLTO could resolve
2307 // these, but it currently does not do so.
2308let can_have_static_objects =
2309tcx.sess.lto() == Lto::Thin || tcx.crate_types().contains(&CrateType::Rlib);
23102311tcx.sess.target.is_like_windows &&
2312can_have_static_objects &&
2313// ThinLTO can't handle this workaround in all cases, so we don't
2314 // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
2315 // dynamic linking when linker plugin LTO is enabled.
2316!tcx.sess.opts.cg.linker_plugin_lto.enabled()
2317}