Skip to main content

rustc_middle/dep_graph/
graph.rs

1use std::assert_matches;
2use std::fmt::Debug;
3use std::hash::Hash;
4use std::sync::Arc;
5use std::sync::atomic::{AtomicU32, Ordering};
6
7use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
8use rustc_data_structures::fx::FxHashSet;
9use rustc_data_structures::profiling::QueryInvocationId;
10use rustc_data_structures::sharded::{self, ShardedHashMap};
11use rustc_data_structures::stable_hasher::{StableHash, StableHasher};
12use rustc_data_structures::sync::{AtomicU64, Lock};
13use rustc_data_structures::unord::UnordMap;
14use rustc_errors::DiagInner;
15use rustc_index::IndexVec;
16use rustc_macros::{Decodable, Encodable};
17use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
18use rustc_session::Session;
19use rustc_span::Symbol;
20use tracing::instrument;
21#[cfg(debug_assertions)]
22use {super::debug::EdgeFilter, std::env};
23
24use super::retained::RetainedDepGraph;
25use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
26use super::{DepKind, DepNode, WorkProductId, read_deps, with_deps};
27use crate::dep_graph::edges::EdgesVec;
28use crate::ich::StableHashingContext;
29use crate::ty::TyCtxt;
30use crate::verify_ich::incremental_verify_ich;
31
32/// Tracks 'side effects' for a particular query.
33/// This struct is saved to disk along with the query result,
34/// and loaded from disk if we mark the query as green.
35/// This allows us to 'replay' changes to global state
36/// that would otherwise only occur if we actually
37/// executed the query method.
38///
39/// Each side effect gets an unique dep node index which is added
40/// as a dependency of the query which had the effect.
41#[derive(#[automatically_derived]
impl ::core::fmt::Debug for QuerySideEffect {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            QuerySideEffect::Diagnostic(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f,
                    "Diagnostic", &__self_0),
            QuerySideEffect::CheckFeature { symbol: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "CheckFeature", "symbol", &__self_0),
        }
    }
}Debug, const _: () =
    {
        impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
            for QuerySideEffect {
            fn encode(&self, __encoder: &mut __E) {
                let disc =
                    match *self {
                        QuerySideEffect::Diagnostic(ref __binding_0) => { 0usize }
                        QuerySideEffect::CheckFeature { symbol: ref __binding_0 } =>
                            {
                            1usize
                        }
                    };
                ::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
                match *self {
                    QuerySideEffect::Diagnostic(ref __binding_0) => {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                    QuerySideEffect::CheckFeature { symbol: ref __binding_0 } =>
                        {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                    }
                }
            }
        }
    };Encodable, const _: () =
    {
        impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
            for QuerySideEffect {
            fn decode(__decoder: &mut __D) -> Self {
                match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
                    {
                    0usize => {
                        QuerySideEffect::Diagnostic(::rustc_serialize::Decodable::decode(__decoder))
                    }
                    1usize => {
                        QuerySideEffect::CheckFeature {
                            symbol: ::rustc_serialize::Decodable::decode(__decoder),
                        }
                    }
                    n => {
                        ::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `QuerySideEffect`, expected 0..2, actual {0}",
                                n));
                    }
                }
            }
        }
    };Decodable)]
42pub enum QuerySideEffect {
43    /// Stores a diagnostic emitted during query execution.
44    /// This diagnostic will be re-emitted if we mark
45    /// the query as green, as that query will have the side
46    /// effect dep node as a dependency.
47    Diagnostic(DiagInner),
48    /// Records the feature used during query execution.
49    /// This feature will be inserted into `sess.used_features`
50    /// if we mark the query as green, as that query will have
51    /// the side effect dep node as a dependency.
52    CheckFeature { symbol: Symbol },
53}
54
55#[derive(#[automatically_derived]
impl ::core::clone::Clone for DepGraph {
    #[inline]
    fn clone(&self) -> DepGraph {
        DepGraph {
            data: ::core::clone::Clone::clone(&self.data),
            virtual_dep_node_index: ::core::clone::Clone::clone(&self.virtual_dep_node_index),
        }
    }
}Clone)]
56pub struct DepGraph {
57    data: Option<Arc<DepGraphData>>,
58
59    /// This field is used for assigning DepNodeIndices when running in
60    /// non-incremental mode. Even in non-incremental mode we make sure that
61    /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
62    /// ID is used for self-profiling.
63    virtual_dep_node_index: Arc<AtomicU32>,
64}
65
66impl ::std::fmt::Debug for DepNodeIndex {
    fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        fmt.write_fmt(format_args!("{0}", self.as_u32()))
    }
}rustc_index::newtype_index! {
67    pub struct DepNodeIndex {}
68}
69
70// We store a large collection of these in `prev_index_to_index` during
71// non-full incremental builds, and want to ensure that the element size
72// doesn't inadvertently increase.
73const _: [(); 4] = [(); ::std::mem::size_of::<Option<DepNodeIndex>>()];rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
74
75impl DepNodeIndex {
76    const SINGLETON_ZERO_DEPS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
77    pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
78}
79
80impl From<DepNodeIndex> for QueryInvocationId {
81    #[inline(always)]
82    fn from(dep_node_index: DepNodeIndex) -> Self {
83        QueryInvocationId(dep_node_index.as_u32())
84    }
85}
86
87pub(crate) struct MarkFrame<'a> {
88    index: SerializedDepNodeIndex,
89    parent: Option<&'a MarkFrame<'a>>,
90}
91
92#[derive(#[automatically_derived]
impl ::core::fmt::Debug for DepNodeColor {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            DepNodeColor::Green(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Green",
                    &__self_0),
            DepNodeColor::Red => ::core::fmt::Formatter::write_str(f, "Red"),
            DepNodeColor::Unknown =>
                ::core::fmt::Formatter::write_str(f, "Unknown"),
        }
    }
}Debug)]
93pub(super) enum DepNodeColor {
94    Green(DepNodeIndex),
95    Red,
96    Unknown,
97}
98
99pub struct DepGraphData {
100    /// The new encoding of the dependency graph, optimized for red/green
101    /// tracking. The `current` field is the dependency graph of only the
102    /// current compilation session: We don't merge the previous dep-graph into
103    /// current one anymore, but we do reference shared data to save space.
104    current: CurrentDepGraph,
105
106    /// The dep-graph from the previous compilation session. It contains all
107    /// nodes and edges as well as all fingerprints of nodes that have them.
108    previous: Arc<SerializedDepGraph>,
109
110    colors: DepNodeColorMap,
111
112    /// When we load, there may be `.o` files, cached MIR, or other such
113    /// things available to us. If we find that they are not dirty, we
114    /// load the path to the file storing those work-products here into
115    /// this map. We can later look for and extract that data.
116    previous_work_products: WorkProductMap,
117
118    /// Used by incremental compilation tests to assert that
119    /// a particular query result was decoded from disk
120    /// (not just marked green)
121    debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
122}
123
124pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
125where
126    R: StableHash,
127{
128    let mut stable_hasher = StableHasher::new();
129    result.stable_hash(hcx, &mut stable_hasher);
130    stable_hasher.finish()
131}
132
133impl DepGraph {
134    pub fn new(
135        session: &Session,
136        prev_graph: Arc<SerializedDepGraph>,
137        prev_work_products: WorkProductMap,
138        encoder: FileEncoder,
139    ) -> DepGraph {
140        let prev_graph_node_count = prev_graph.node_count();
141
142        let current =
143            CurrentDepGraph::new(session, prev_graph_node_count, encoder, Arc::clone(&prev_graph));
144
145        let colors = DepNodeColorMap::new(prev_graph_node_count);
146
147        // Instantiate a node with zero dependencies only once for anonymous queries.
148        let _green_node_index = current.alloc_new_node(
149            DepNode { kind: DepKind::AnonZeroDeps, key_fingerprint: current.anon_id_seed.into() },
150            EdgesVec::new(),
151            Fingerprint::ZERO,
152        );
153        match (&_green_node_index, &DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE);
154
155        // Create a single always-red node, with no dependencies of its own.
156        // Other nodes can use the always-red node as a fake dependency, to
157        // ensure that their dependency list will never be all-green.
158        let red_node_index = current.alloc_new_node(
159            DepNode { kind: DepKind::Red, key_fingerprint: Fingerprint::ZERO.into() },
160            EdgesVec::new(),
161            Fingerprint::ZERO,
162        );
163        match (&red_node_index, &DepNodeIndex::FOREVER_RED_NODE) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
164        if prev_graph_node_count > 0 {
165            let prev_index =
166                const { SerializedDepNodeIndex::from_u32(DepNodeIndex::FOREVER_RED_NODE.as_u32()) };
167            let result = colors.try_set_color(prev_index, DesiredColor::Red);
168            {
    match result {
        TrySetColorResult::Success => {}
        ref left_val => {
            ::core::panicking::assert_matches_failed(left_val,
                "TrySetColorResult::Success", ::core::option::Option::None);
        }
    }
};assert_matches!(result, TrySetColorResult::Success);
169        }
170
171        DepGraph {
172            data: Some(Arc::new(DepGraphData {
173                previous_work_products: prev_work_products,
174                current,
175                previous: prev_graph,
176                colors,
177                debug_loaded_from_disk: Default::default(),
178            })),
179            virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
180        }
181    }
182
183    pub fn new_disabled() -> DepGraph {
184        DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
185    }
186
187    #[inline]
188    pub fn data(&self) -> Option<&DepGraphData> {
189        self.data.as_deref()
190    }
191
192    /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
193    #[inline]
194    pub fn is_fully_enabled(&self) -> bool {
195        self.data.is_some()
196    }
197
198    pub fn with_retained_dep_graph(&self, f: impl Fn(&RetainedDepGraph)) {
199        if let Some(data) = &self.data {
200            data.current.encoder.with_retained_dep_graph(f)
201        }
202    }
203
204    pub fn assert_ignored(&self) {
205        if let Some(..) = self.data {
206            read_deps(|task_deps| {
207                {
    match task_deps {
        TaskDepsRef::Ignore => {}
        ref left_val => {
            ::core::panicking::assert_matches_failed(left_val,
                "TaskDepsRef::Ignore",
                ::core::option::Option::Some(format_args!("expected no task dependency tracking")));
        }
    }
};assert_matches!(
208                    task_deps,
209                    TaskDepsRef::Ignore,
210                    "expected no task dependency tracking"
211                );
212            })
213        }
214    }
215
216    pub fn with_ignore<OP, R>(&self, op: OP) -> R
217    where
218        OP: FnOnce() -> R,
219    {
220        with_deps(TaskDepsRef::Ignore, op)
221    }
222
223    /// Used to wrap the deserialization of a query result from disk,
224    /// This method enforces that no new `DepNodes` are created during
225    /// query result deserialization.
226    ///
227    /// Enforcing this makes the query dep graph simpler - all nodes
228    /// must be created during the query execution, and should be
229    /// created from inside the 'body' of a query (the implementation
230    /// provided by a particular compiler crate).
231    ///
232    /// Consider the case of three queries `A`, `B`, and `C`, where
233    /// `A` invokes `B` and `B` invokes `C`:
234    ///
235    /// `A -> B -> C`
236    ///
237    /// Suppose that decoding the result of query `B` required re-computing
238    /// the query `C`. If we did not create a fresh `TaskDeps` when
239    /// decoding `B`, we would still be using the `TaskDeps` for query `A`
240    /// (if we needed to re-execute `A`). This would cause us to create
241    /// a new edge `A -> C`. If this edge did not previously
242    /// exist in the `DepGraph`, then we could end up with a different
243    /// `DepGraph` at the end of compilation, even if there were no
244    /// meaningful changes to the overall program (e.g. a newline was added).
245    /// In addition, this edge might cause a subsequent compilation run
246    /// to try to force `C` before marking other necessary nodes green. If
247    /// `C` did not exist in the new compilation session, then we could
248    /// get an ICE. Normally, we would have tried (and failed) to mark
249    /// some other query green (e.g. `item_children`) which was used
250    /// to obtain `C`, which would prevent us from ever trying to force
251    /// a nonexistent `D`.
252    ///
253    /// It might be possible to enforce that all `DepNode`s read during
254    /// deserialization already exist in the previous `DepGraph`. In
255    /// the above example, we would invoke `D` during the deserialization
256    /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
257    /// of `B`, this would result in an edge `B -> D`. If that edge already
258    /// existed (with the same `DepPathHash`es), then it should be correct
259    /// to allow the invocation of the query to proceed during deserialization
260    /// of a query result. We would merely assert that the dep-graph fragment
261    /// that would have been added by invoking `C` while decoding `B`
262    /// is equivalent to the dep-graph fragment that we already instantiated for B
263    /// (at the point where we successfully marked B as green).
264    ///
265    /// However, this would require additional complexity
266    /// in the query infrastructure, and is not currently needed by the
267    /// decoding of any query results. Should the need arise in the future,
268    /// we should consider extending the query system with this functionality.
269    pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
270    where
271        OP: FnOnce() -> R,
272    {
273        with_deps(TaskDepsRef::Forbid, op)
274    }
275
276    #[inline(always)]
277    pub fn with_task<'tcx, OP, R>(
278        &self,
279        dep_node: DepNode,
280        tcx: TyCtxt<'tcx>,
281        op: OP,
282        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
283    ) -> (R, DepNodeIndex)
284    where
285        OP: FnOnce() -> R,
286    {
287        match self.data() {
288            Some(data) => data.with_task(dep_node, tcx, op, hash_result),
289            None => (op(), self.next_virtual_depnode_index()),
290        }
291    }
292
293    pub fn with_anon_task<'tcx, OP, R>(
294        &self,
295        tcx: TyCtxt<'tcx>,
296        dep_kind: DepKind,
297        op: OP,
298    ) -> (R, DepNodeIndex)
299    where
300        OP: FnOnce() -> R,
301    {
302        match self.data() {
303            Some(data) => {
304                let (result, index) = data.with_anon_task_inner(tcx, dep_kind, op);
305                self.read_index(index);
306                (result, index)
307            }
308            None => (op(), self.next_virtual_depnode_index()),
309        }
310    }
311}
312
313impl DepGraphData {
314    #[inline(always)]
315    pub fn with_task<'tcx, OP, R>(
316        &self,
317        dep_node: DepNode,
318        tcx: TyCtxt<'tcx>,
319        op: OP,
320        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
321    ) -> (R, DepNodeIndex)
322    where
323        OP: FnOnce() -> R,
324    {
325        // If the following assertion triggers, it can have two reasons:
326        // 1. Something is wrong with DepNode creation, either here or
327        //    in `DepGraph::try_mark_green()`.
328        // 2. Two distinct query keys get mapped to the same `DepNode`
329        //    (see for example #48923).
330        self.assert_dep_node_not_yet_allocated_in_current_session(tcx.sess, &dep_node, || {
331            ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("forcing query with already existing `DepNode`: {0:?}",
                dep_node))
    })format!("forcing query with already existing `DepNode`: {dep_node:?}")
332        });
333
334        let with_deps = |task_deps| with_deps(task_deps, op);
335        let (result, edges) = if tcx.is_eval_always(dep_node.kind) {
336            (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
337        } else {
338            let task_deps = Lock::new(TaskDeps::new(
339                #[cfg(debug_assertions)]
340                Some(dep_node),
341                0,
342            ));
343            (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
344        };
345
346        let dep_node_index =
347            self.hash_result_and_alloc_node(tcx, dep_node, edges, &result, hash_result);
348
349        (result, dep_node_index)
350    }
351
352    /// Executes something within an "anonymous" task, that is, a task the
353    /// `DepNode` of which is determined by the list of inputs it read from.
354    ///
355    /// NOTE: this does not actually count as a read of the DepNode here.
356    /// Using the result of this task without reading the DepNode will result
357    /// in untracked dependencies which may lead to ICEs as nodes are
358    /// incorrectly marked green.
359    ///
360    /// FIXME: This could perhaps return a `WithDepNode` to ensure that the
361    /// user of this function actually performs the read.
362    fn with_anon_task_inner<'tcx, OP, R>(
363        &self,
364        tcx: TyCtxt<'tcx>,
365        dep_kind: DepKind,
366        op: OP,
367    ) -> (R, DepNodeIndex)
368    where
369        OP: FnOnce() -> R,
370    {
371        if true {
    if !!tcx.is_eval_always(dep_kind) {
        ::core::panicking::panic("assertion failed: !tcx.is_eval_always(dep_kind)")
    };
};debug_assert!(!tcx.is_eval_always(dep_kind));
372
373        // Large numbers of reads are common enough here that pre-sizing `read_set`
374        // to 128 actually helps perf on some benchmarks.
375        let task_deps = Lock::new(TaskDeps::new(
376            #[cfg(debug_assertions)]
377            None,
378            128,
379        ));
380        let result = with_deps(TaskDepsRef::Allow(&task_deps), op);
381        let task_deps = task_deps.into_inner();
382        let reads = task_deps.reads;
383
384        let dep_node_index = match reads.len() {
385            0 => {
386                // Because the dep-node id of anon nodes is computed from the sets of its
387                // dependencies we already know what the ID of this dependency-less node is
388                // going to be (i.e. equal to the precomputed
389                // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
390                // a `StableHasher` and sending the node through interning.
391                DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE
392            }
393            1 => {
394                // When there is only one dependency, don't bother creating a node.
395                reads[0]
396            }
397            _ => {
398                // The dep node indices are hashed here instead of hashing the dep nodes of the
399                // dependencies. These indices may refer to different nodes per session, but this isn't
400                // a problem here because we that ensure the final dep node hash is per session only by
401                // combining it with the per session random number `anon_id_seed`. This hash only need
402                // to map the dependencies to a single value on a per session basis.
403                let mut hasher = StableHasher::new();
404                reads.hash(&mut hasher);
405
406                let target_dep_node = DepNode {
407                    kind: dep_kind,
408                    // Fingerprint::combine() is faster than sending Fingerprint
409                    // through the StableHasher (at least as long as StableHasher
410                    // is so slow).
411                    key_fingerprint: self.current.anon_id_seed.combine(hasher.finish()).into(),
412                };
413
414                // The DepNodes generated by the process above are not unique. 2 queries could
415                // have exactly the same dependencies. However, deserialization does not handle
416                // duplicated nodes, so we do the deduplication here directly.
417                //
418                // As anonymous nodes are a small quantity compared to the full dep-graph, the
419                // memory impact of this `anon_node_to_index` map remains tolerable, and helps
420                // us avoid useless growth of the graph with almost-equivalent nodes.
421                self.current.anon_node_to_index.get_or_insert_with(target_dep_node, || {
422                    self.current.alloc_new_node(target_dep_node, reads, Fingerprint::ZERO)
423                })
424            }
425        };
426
427        (result, dep_node_index)
428    }
429
430    /// Intern the new `DepNode` with the dependencies up-to-now.
431    fn hash_result_and_alloc_node<'tcx, R>(
432        &self,
433        tcx: TyCtxt<'tcx>,
434        node: DepNode,
435        edges: EdgesVec,
436        result: &R,
437        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
438    ) -> DepNodeIndex {
439        let hashing_timer = tcx.prof.incr_result_hashing();
440        let current_fingerprint = hash_result.map(|hash_result| {
441            tcx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
442        });
443        let dep_node_index = self.alloc_and_color_node(node, edges, current_fingerprint);
444        hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
445        dep_node_index
446    }
447}
448
449impl DepGraph {
450    #[inline]
451    pub fn read_index(&self, dep_node_index: DepNodeIndex) {
452        if let Some(ref data) = self.data {
453            read_deps(|task_deps| {
454                let mut task_deps = match task_deps {
455                    TaskDepsRef::Allow(deps) => deps.lock(),
456                    TaskDepsRef::EvalAlways => {
457                        // We don't need to record dependencies of eval_always
458                        // queries. They are re-evaluated unconditionally anyway.
459                        return;
460                    }
461                    TaskDepsRef::Ignore => return,
462                    TaskDepsRef::Forbid => {
463                        // Reading is forbidden in this context. ICE with a useful error message.
464                        panic_on_forbidden_read(data, dep_node_index)
465                    }
466                };
467                let task_deps = &mut *task_deps;
468
469                if truecfg!(debug_assertions) {
470                    data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
471                }
472
473                // Has `dep_node_index` been seen before? Use either a linear scan or a hashset
474                // lookup to determine this. See `TaskDeps::read_set` for details.
475                let new_read = if task_deps.reads.len() <= TaskDeps::LINEAR_SCAN_MAX {
476                    !task_deps.reads.contains(&dep_node_index)
477                } else {
478                    task_deps.read_set.insert(dep_node_index)
479                };
480                if new_read {
481                    task_deps.reads.push(dep_node_index);
482                    if task_deps.reads.len() == TaskDeps::LINEAR_SCAN_MAX + 1 {
483                        // Fill `read_set` with what we have so far. Future lookups will use it.
484                        task_deps.read_set.extend(task_deps.reads.iter().copied());
485                    }
486
487                    #[cfg(debug_assertions)]
488                    {
489                        if let Some(target) = task_deps.node
490                            && let Some(ref forbidden_edge) = data.current.forbidden_edge
491                        {
492                            let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
493                            if forbidden_edge.test(&src, &target) {
494                                {
    ::core::panicking::panic_fmt(format_args!("forbidden edge {0:?} -> {1:?} created",
            src, target));
}panic!("forbidden edge {:?} -> {:?} created", src, target)
495                            }
496                        }
497                    }
498                } else if truecfg!(debug_assertions) {
499                    data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
500                }
501            })
502        }
503    }
504
505    /// This encodes a side effect by creating a node with an unique index and associating
506    /// it with the node, for use in the next session.
507    #[inline]
508    pub fn record_diagnostic<'tcx>(&self, tcx: TyCtxt<'tcx>, diagnostic: &DiagInner) {
509        if let Some(ref data) = self.data {
510            read_deps(|task_deps| match task_deps {
511                TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return,
512                TaskDepsRef::Forbid | TaskDepsRef::Allow(..) => {
513                    let dep_node_index = data
514                        .encode_side_effect(tcx, QuerySideEffect::Diagnostic(diagnostic.clone()));
515                    self.read_index(dep_node_index);
516                }
517            })
518        }
519    }
520    /// This forces a side effect node green by running its side effect. `prev_index` would
521    /// refer to a node created used `encode_side_effect` in the previous session.
522    #[inline]
523    pub fn force_side_effect<'tcx>(&self, tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex) {
524        if let Some(ref data) = self.data {
525            data.force_side_effect(tcx, prev_index);
526        }
527    }
528
529    #[inline]
530    pub fn encode_side_effect<'tcx>(
531        &self,
532        tcx: TyCtxt<'tcx>,
533        side_effect: QuerySideEffect,
534    ) -> DepNodeIndex {
535        if let Some(ref data) = self.data {
536            data.encode_side_effect(tcx, side_effect)
537        } else {
538            self.next_virtual_depnode_index()
539        }
540    }
541
542    /// Create a node when we force-feed a value into the query cache.
543    /// This is used to remove cycles during type-checking const generic parameters.
544    ///
545    /// As usual in the query system, we consider the current state of the calling query
546    /// only depends on the list of dependencies up to now. As a consequence, the value
547    /// that this query gives us can only depend on those dependencies too. Therefore,
548    /// it is sound to use the current dependency set for the created node.
549    ///
550    /// During replay, the order of the nodes is relevant in the dependency graph.
551    /// So the unchanged replay will mark the caller query before trying to mark this one.
552    /// If there is a change to report, the caller query will be re-executed before this one.
553    ///
554    /// FIXME: If the code is changed enough for this node to be marked before requiring the
555    /// caller's node, we suppose that those changes will be enough to mark this node red and
556    /// force a recomputation using the "normal" way.
557    pub fn with_feed_task<'tcx, R>(
558        &self,
559        node: DepNode,
560        tcx: TyCtxt<'tcx>,
561        result: &R,
562        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
563        format_value_fn: fn(&R) -> String,
564    ) -> DepNodeIndex {
565        if let Some(data) = self.data.as_ref() {
566            // The caller query has more dependencies than the node we are creating. We may
567            // encounter a case where this created node is marked as green, but the caller query is
568            // subsequently marked as red or recomputed. In this case, we will end up feeding a
569            // value to an existing node.
570            //
571            // For sanity, we still check that the loaded stable hash and the new one match.
572            if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
573                let dep_node_index = data.colors.current(prev_index);
574                if let Some(dep_node_index) = dep_node_index {
575                    incremental_verify_ich(
576                        tcx,
577                        data,
578                        result,
579                        prev_index,
580                        hash_result,
581                        format_value_fn,
582                    );
583
584                    #[cfg(debug_assertions)]
585                    if hash_result.is_some() {
586                        data.current.record_edge(
587                            dep_node_index,
588                            node,
589                            data.prev_value_fingerprint_of(prev_index),
590                        );
591                    }
592
593                    return dep_node_index;
594                }
595            }
596
597            let mut edges = EdgesVec::new();
598            read_deps(|task_deps| match task_deps {
599                TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
600                TaskDepsRef::EvalAlways => {
601                    edges.push(DepNodeIndex::FOREVER_RED_NODE);
602                }
603                TaskDepsRef::Ignore => {}
604                TaskDepsRef::Forbid => {
605                    {
    ::core::panicking::panic_fmt(format_args!("Cannot summarize when dependencies are not recorded."));
}panic!("Cannot summarize when dependencies are not recorded.")
606                }
607            });
608
609            data.hash_result_and_alloc_node(tcx, node, edges, result, hash_result)
610        } else {
611            // Incremental compilation is turned off. We just execute the task
612            // without tracking. We still provide a dep-node index that uniquely
613            // identifies the task so that we have a cheap way of referring to
614            // the query for self-profiling.
615            self.next_virtual_depnode_index()
616        }
617    }
618}
619
620impl DepGraphData {
621    fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
622        &self,
623        sess: &Session,
624        dep_node: &DepNode,
625        msg: impl FnOnce() -> S,
626    ) {
627        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
628            let color = self.colors.get(prev_index);
629            let ok = match color {
630                DepNodeColor::Unknown => true,
631                DepNodeColor::Red => false,
632                DepNodeColor::Green(..) => sess.threads() > 1, // Other threads may mark this green
633            };
634            if !ok {
635                { ::core::panicking::panic_display(&msg()); }panic!("{}", msg())
636            }
637        }
638    }
639
640    fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
641        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
642            self.colors.get(prev_index)
643        } else {
644            // This is a node that did not exist in the previous compilation session.
645            DepNodeColor::Unknown
646        }
647    }
648
649    /// Returns true if the given node has been marked as green during the
650    /// current compilation session. Used in various assertions
651    #[inline]
652    pub fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
653        #[allow(non_exhaustive_omitted_patterns)] match self.colors.get(prev_index) {
    DepNodeColor::Green(_) => true,
    _ => false,
}matches!(self.colors.get(prev_index), DepNodeColor::Green(_))
654    }
655
656    #[inline]
657    pub fn prev_value_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
658        self.previous.value_fingerprint_for_index(prev_index)
659    }
660
661    #[inline]
662    pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> &DepNode {
663        self.previous.index_to_node(prev_index)
664    }
665
666    pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
667        self.debug_loaded_from_disk.lock().insert(dep_node);
668    }
669
670    /// This encodes a side effect by creating a node with an unique index and associating
671    /// it with the node, for use in the next session.
672    #[inline]
673    fn encode_side_effect<'tcx>(
674        &self,
675        tcx: TyCtxt<'tcx>,
676        side_effect: QuerySideEffect,
677    ) -> DepNodeIndex {
678        // Use `send_new` so we get an unique index, even though the dep node is not.
679        let dep_node_index = self.current.encoder.send_new(
680            DepNode {
681                kind: DepKind::SideEffect,
682                key_fingerprint: PackedFingerprint::from(Fingerprint::ZERO),
683            },
684            Fingerprint::ZERO,
685            // We want the side effect node to always be red so it will be forced and run the
686            // side effect.
687            std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
688        );
689        tcx.query_system.side_effects.borrow_mut().insert(dep_node_index, side_effect);
690        dep_node_index
691    }
692
693    /// This forces a side effect node green by running its side effect. `prev_index` would
694    /// refer to a node created used `encode_side_effect` in the previous session.
695    #[inline]
696    fn force_side_effect<'tcx>(&self, tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex) {
697        with_deps(TaskDepsRef::Ignore, || {
698            let side_effect = tcx
699                .query_system
700                .on_disk_cache
701                .as_ref()
702                .unwrap()
703                .load_side_effect(tcx, prev_index)
704                .unwrap();
705
706            // Use `send_and_color` as `promote_node_and_deps_to_current` expects all
707            // green dependencies. `send_and_color` will also prevent multiple nodes
708            // being encoded for concurrent calls.
709            let dep_node_index = self.current.encoder.send_and_color(
710                prev_index,
711                &self.colors,
712                DepNode {
713                    kind: DepKind::SideEffect,
714                    key_fingerprint: PackedFingerprint::from(Fingerprint::ZERO),
715                },
716                Fingerprint::ZERO,
717                std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
718                true,
719            );
720
721            match &side_effect {
722                QuerySideEffect::Diagnostic(diagnostic) => {
723                    tcx.dcx().emit_diagnostic(diagnostic.clone());
724                }
725                QuerySideEffect::CheckFeature { symbol } => {
726                    tcx.sess.used_features.lock().insert(*symbol, dep_node_index.as_u32());
727                }
728            }
729
730            // This will just overwrite the same value for concurrent calls.
731            tcx.query_system.side_effects.borrow_mut().insert(dep_node_index, side_effect);
732        })
733    }
734
735    fn alloc_and_color_node(
736        &self,
737        key: DepNode,
738        edges: EdgesVec,
739        value_fingerprint: Option<Fingerprint>,
740    ) -> DepNodeIndex {
741        if let Some(prev_index) = self.previous.node_to_index_opt(&key) {
742            // Determine the color and index of the new `DepNode`.
743            let is_green = if let Some(value_fingerprint) = value_fingerprint {
744                if value_fingerprint == self.previous.value_fingerprint_for_index(prev_index) {
745                    // This is a green node: it existed in the previous compilation,
746                    // its query was re-executed, and it has the same result as before.
747                    true
748                } else {
749                    // This is a red node: it existed in the previous compilation, its query
750                    // was re-executed, but it has a different result from before.
751                    false
752                }
753            } else {
754                // This is a red node, effectively: it existed in the previous compilation
755                // session, its query was re-executed, but it doesn't compute a result hash
756                // (i.e. it represents a `no_hash` query), so we have no way of determining
757                // whether or not the result was the same as before.
758                false
759            };
760
761            let value_fingerprint = value_fingerprint.unwrap_or(Fingerprint::ZERO);
762
763            let dep_node_index = self.current.encoder.send_and_color(
764                prev_index,
765                &self.colors,
766                key,
767                value_fingerprint,
768                edges,
769                is_green,
770            );
771
772            #[cfg(debug_assertions)]
773            self.current.record_edge(dep_node_index, key, value_fingerprint);
774
775            dep_node_index
776        } else {
777            self.current.alloc_new_node(key, edges, value_fingerprint.unwrap_or(Fingerprint::ZERO))
778        }
779    }
780
781    fn promote_node_and_deps_to_current(
782        &self,
783        prev_index: SerializedDepNodeIndex,
784    ) -> Option<DepNodeIndex> {
785        let dep_node_index = self.current.encoder.send_promoted(prev_index, &self.colors);
786
787        #[cfg(debug_assertions)]
788        if let Some(dep_node_index) = dep_node_index {
789            self.current.record_edge(
790                dep_node_index,
791                *self.previous.index_to_node(prev_index),
792                self.previous.value_fingerprint_for_index(prev_index),
793            );
794        }
795
796        dep_node_index
797    }
798}
799
800impl DepGraph {
801    /// Checks whether a previous work product exists for `v` and, if
802    /// so, return the path that leads to it. Used to skip doing work.
803    pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
804        self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
805    }
806
807    /// Access the map of work-products created during the cached run. Only
808    /// used during saving of the dep-graph.
809    pub fn previous_work_products(&self) -> &WorkProductMap {
810        &self.data.as_ref().unwrap().previous_work_products
811    }
812
813    pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
814        self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
815    }
816
817    pub fn debug_dep_kind_was_loaded_from_disk(&self, dep_kind: DepKind) -> bool {
818        // We only check if we have a dep node corresponding to the given dep kind.
819        #[allow(rustc::potential_query_instability)]
820        self.data
821            .as_ref()
822            .unwrap()
823            .debug_loaded_from_disk
824            .lock()
825            .iter()
826            .any(|node| node.kind == dep_kind)
827    }
828
829    fn node_color(&self, dep_node: &DepNode) -> DepNodeColor {
830        if let Some(ref data) = self.data {
831            return data.node_color(dep_node);
832        }
833
834        DepNodeColor::Unknown
835    }
836
837    pub fn try_mark_green<'tcx>(
838        &self,
839        tcx: TyCtxt<'tcx>,
840        dep_node: &DepNode,
841    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
842        self.data()?.try_mark_green(tcx, dep_node)
843    }
844}
845
846impl DepGraphData {
847    /// Try to mark a node index for the node dep_node.
848    ///
849    /// A node will have an index, when it's already been marked green, or when we can mark it
850    /// green. This function will mark the current task as a reader of the specified node, when
851    /// a node index can be found for that node.
852    pub fn try_mark_green<'tcx>(
853        &self,
854        tcx: TyCtxt<'tcx>,
855        dep_node: &DepNode,
856    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
857        if true {
    if !!tcx.is_eval_always(dep_node.kind) {
        ::core::panicking::panic("assertion failed: !tcx.is_eval_always(dep_node.kind)")
    };
};debug_assert!(!tcx.is_eval_always(dep_node.kind));
858
859        // Return None if the dep node didn't exist in the previous session
860        let prev_index = self.previous.node_to_index_opt(dep_node)?;
861
862        if true {
    match (&self.previous.index_to_node(prev_index), &dep_node) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(self.previous.index_to_node(prev_index), dep_node);
863
864        match self.colors.get(prev_index) {
865            DepNodeColor::Green(dep_node_index) => Some((prev_index, dep_node_index)),
866            DepNodeColor::Red => None,
867            DepNodeColor::Unknown => {
868                // This DepNode and the corresponding query invocation existed
869                // in the previous compilation session too, so we can try to
870                // mark it as green by recursively marking all of its
871                // dependencies green.
872                self.try_mark_previous_green(tcx, prev_index, None)
873                    .map(|dep_node_index| (prev_index, dep_node_index))
874            }
875        }
876    }
877
878    /// Try to mark a dep-node which existed in the previous compilation session as green.
879    #[allow(clippy :: suspicious_else_formatting)]
{
    let __tracing_attr_span;
    let __tracing_attr_guard;
    if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() ||
            { false } {
        __tracing_attr_span =
            {
                use ::tracing::__macro_support::Callsite as _;
                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                    {
                        static META: ::tracing::Metadata<'static> =
                            {
                                ::tracing_core::metadata::Metadata::new("try_mark_previous_green",
                                    "rustc_middle::dep_graph::graph", ::tracing::Level::DEBUG,
                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_middle/src/dep_graph/graph.rs"),
                                    ::tracing_core::__macro_support::Option::Some(879u32),
                                    ::tracing_core::__macro_support::Option::Some("rustc_middle::dep_graph::graph"),
                                    ::tracing_core::field::FieldSet::new(&[],
                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                    ::tracing::metadata::Kind::SPAN)
                            };
                        ::tracing::callsite::DefaultCallsite::new(&META)
                    };
                let mut interest = ::tracing::subscriber::Interest::never();
                if ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::LevelFilter::current() &&
                            { interest = __CALLSITE.interest(); !interest.is_never() }
                        &&
                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                            interest) {
                    let meta = __CALLSITE.metadata();
                    ::tracing::Span::new(meta,
                        &{ meta.fields().value_set(&[]) })
                } else {
                    let span =
                        ::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
                    {};
                    span
                }
            };
        __tracing_attr_guard = __tracing_attr_span.enter();
    }

    #[warn(clippy :: suspicious_else_formatting)]
    {

        #[allow(unknown_lints, unreachable_code, clippy ::
        diverging_sub_expression, clippy :: empty_loop, clippy ::
        let_unit_value, clippy :: let_with_type_underscore, clippy ::
        needless_return, clippy :: unreachable)]
        if false {
            let __tracing_attr_fake_return: Option<DepNodeIndex> = loop {};
            return __tracing_attr_fake_return;
        }
        {
            let frame =
                MarkFrame { index: prev_dep_node_index, parent: frame };
            if true {
                if !!tcx.is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)
                    {
                    ::core::panicking::panic("assertion failed: !tcx.is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind)")
                };
            };
            for parent_dep_node_index in
                self.previous.edge_targets_from(prev_dep_node_index) {
                match self.colors.get(parent_dep_node_index) {
                    DepNodeColor::Green(_) => continue,
                    DepNodeColor::Red => return None,
                    DepNodeColor::Unknown => {}
                }
                let parent_dep_node =
                    self.previous.index_to_node(parent_dep_node_index);
                if !tcx.is_eval_always(parent_dep_node.kind) &&
                        self.try_mark_previous_green(tcx, parent_dep_node_index,
                                Some(&frame)).is_some() {
                    continue;
                }
                if !tcx.try_force_from_dep_node(*parent_dep_node,
                            parent_dep_node_index, &frame) {
                    return None;
                }
                match self.colors.get(parent_dep_node_index) {
                    DepNodeColor::Green(_) => continue,
                    DepNodeColor::Red => return None,
                    DepNodeColor::Unknown => {}
                }
                if tcx.dcx().has_errors_or_delayed_bugs().is_none() {
                    {
                        ::core::panicking::panic_fmt(format_args!("try_mark_previous_green() - forcing failed to set a color"));
                    };
                }
                return None;
            }
            let dep_node_index =
                self.promote_node_and_deps_to_current(prev_dep_node_index)?;
            Some(dep_node_index)
        }
    }
}#[instrument(skip(self, tcx, prev_dep_node_index, frame), level = "debug")]
880    fn try_mark_previous_green<'tcx>(
881        &self,
882        tcx: TyCtxt<'tcx>,
883        prev_dep_node_index: SerializedDepNodeIndex,
884        frame: Option<&MarkFrame<'_>>,
885    ) -> Option<DepNodeIndex> {
886        let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
887
888        // We never try to mark eval_always nodes as green
889        debug_assert!(!tcx.is_eval_always(self.previous.index_to_node(prev_dep_node_index).kind));
890
891        for parent_dep_node_index in self.previous.edge_targets_from(prev_dep_node_index) {
892            match self.colors.get(parent_dep_node_index) {
893                // This dependency has been marked as green before, we are still ok and can
894                // continue checking the remaining dependencies.
895                DepNodeColor::Green(_) => continue,
896
897                // This dependency's result is different to the previous compilation session. We
898                // cannot mark this dep_node as green, so stop checking.
899                DepNodeColor::Red => return None,
900
901                // We still need to determine this dependency's colour.
902                DepNodeColor::Unknown => {}
903            }
904
905            let parent_dep_node = self.previous.index_to_node(parent_dep_node_index);
906
907            // If this dependency isn't eval_always, try to mark it green recursively.
908            if !tcx.is_eval_always(parent_dep_node.kind)
909                && self.try_mark_previous_green(tcx, parent_dep_node_index, Some(&frame)).is_some()
910            {
911                continue;
912            }
913
914            // We failed to mark it green, so we try to force the query.
915            if !tcx.try_force_from_dep_node(*parent_dep_node, parent_dep_node_index, &frame) {
916                return None;
917            }
918
919            match self.colors.get(parent_dep_node_index) {
920                DepNodeColor::Green(_) => continue,
921                DepNodeColor::Red => return None,
922                DepNodeColor::Unknown => {}
923            }
924
925            if tcx.dcx().has_errors_or_delayed_bugs().is_none() {
926                panic!("try_mark_previous_green() - forcing failed to set a color");
927            }
928
929            // If the query we just forced has resulted in some kind of compilation error, we
930            // cannot rely on the dep-node color having been properly updated. This means that the
931            // query system has reached an invalid state. We let the compiler continue (by
932            // returning `None`) so it can emit error messages and wind down, but rely on the fact
933            // that this invalid state will not be persisted to the incremental compilation cache
934            // because of compilation errors being present.
935            return None;
936        }
937
938        // If we got here without hitting a `return` that means that all
939        // dependencies of this DepNode could be marked as green. Therefore we
940        // can also mark this DepNode as green.
941
942        // There may be multiple threads trying to mark the same dep node green concurrently.
943
944        // We allocating an entry for the node in the current dependency graph and
945        // adding all the appropriate edges imported from the previous graph.
946        //
947        // `no_hash` nodes may fail this promotion due to already being conservatively colored red.
948        let dep_node_index = self.promote_node_and_deps_to_current(prev_dep_node_index)?;
949
950        // ... and finally storing a "Green" entry in the color map.
951        // Multiple threads can all write the same color here.
952
953        Some(dep_node_index)
954    }
955}
956
957impl DepGraph {
958    /// Returns true if the given node has been marked as red during the
959    /// current compilation session. Used in various assertions
960    pub fn is_red(&self, dep_node: &DepNode) -> bool {
961        #[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
    DepNodeColor::Red => true,
    _ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Red)
962    }
963
964    /// Returns true if the given node has been marked as green during the
965    /// current compilation session. Used in various assertions
966    pub fn is_green(&self, dep_node: &DepNode) -> bool {
967        #[allow(non_exhaustive_omitted_patterns)] match self.node_color(dep_node) {
    DepNodeColor::Green(_) => true,
    _ => false,
}matches!(self.node_color(dep_node), DepNodeColor::Green(_))
968    }
969
970    pub fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
971        &self,
972        sess: &Session,
973        dep_node: &DepNode,
974        msg: impl FnOnce() -> S,
975    ) {
976        if let Some(data) = &self.data {
977            data.assert_dep_node_not_yet_allocated_in_current_session(sess, dep_node, msg)
978        }
979    }
980
981    /// This method loads all on-disk cacheable query results into memory, so
982    /// they can be written out to the new cache file again. Most query results
983    /// will already be in memory but in the case where we marked something as
984    /// green but then did not need the value, that value will never have been
985    /// loaded from disk.
986    ///
987    /// This method will only load queries that will end up in the disk cache.
988    /// Other queries will not be executed.
989    pub fn exec_cache_promotions<'tcx>(&self, tcx: TyCtxt<'tcx>) {
990        let _prof_timer = tcx.prof.generic_activity("incr_comp_query_cache_promotion");
991
992        let data = self.data.as_ref().unwrap();
993        for prev_index in data.colors.values.indices() {
994            match data.colors.get(prev_index) {
995                DepNodeColor::Green(_) => {
996                    let dep_node = data.previous.index_to_node(prev_index);
997                    if let Some(promote_fn) =
998                        tcx.dep_kind_vtable(dep_node.kind).promote_from_disk_fn
999                    {
1000                        promote_fn(tcx, *dep_node)
1001                    };
1002                }
1003                DepNodeColor::Unknown | DepNodeColor::Red => {
1004                    // We can skip red nodes because a node can only be marked
1005                    // as red if the query result was recomputed and thus is
1006                    // already in memory.
1007                }
1008            }
1009        }
1010    }
1011
1012    pub(crate) fn finish_encoding(&self) -> FileEncodeResult {
1013        if let Some(data) = &self.data { data.current.encoder.finish(&data.current) } else { Ok(0) }
1014    }
1015
1016    pub fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1017        if true {
    if !self.data.is_none() {
        ::core::panicking::panic("assertion failed: self.data.is_none()")
    };
};debug_assert!(self.data.is_none());
1018        let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
1019        DepNodeIndex::from_u32(index)
1020    }
1021}
1022
1023/// A "work product" is an intermediate result that we save into the
1024/// incremental directory for later re-use. The primary example are
1025/// the object files that we save for each partition at code
1026/// generation time.
1027///
1028/// Each work product is associated with a dep-node, representing the
1029/// process that produced the work-product. If that dep-node is found
1030/// to be dirty when we load up, then we will delete the work-product
1031/// at load time. If the work-product is found to be clean, then we
1032/// will keep a record in the `previous_work_products` list.
1033///
1034/// In addition, work products have an associated hash. This hash is
1035/// an extra hash that can be used to decide if the work-product from
1036/// a previous compilation can be re-used (in addition to the dirty
1037/// edges check).
1038///
1039/// As the primary example, consider the object files we generate for
1040/// each partition. In the first run, we create partitions based on
1041/// the symbols that need to be compiled. For each partition P, we
1042/// hash the symbols in P and create a `WorkProduct` record associated
1043/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
1044/// in P.
1045///
1046/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
1047/// judged to be clean (which means none of the things we read to
1048/// generate the partition were found to be dirty), it will be loaded
1049/// into previous work products. We will then regenerate the set of
1050/// symbols in the partition P and hash them (note that new symbols
1051/// may be added -- for example, new monomorphizations -- even if
1052/// nothing in P changed!). We will compare that hash against the
1053/// previous hash. If it matches up, we can reuse the object file.
1054#[derive(#[automatically_derived]
impl ::core::clone::Clone for WorkProduct {
    #[inline]
    fn clone(&self) -> WorkProduct {
        WorkProduct {
            cgu_name: ::core::clone::Clone::clone(&self.cgu_name),
            saved_files: ::core::clone::Clone::clone(&self.saved_files),
        }
    }
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for WorkProduct {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field2_finish(f, "WorkProduct",
            "cgu_name", &self.cgu_name, "saved_files", &&self.saved_files)
    }
}Debug, const _: () =
    {
        impl<__E: ::rustc_span::SpanEncoder> ::rustc_serialize::Encodable<__E>
            for WorkProduct {
            fn encode(&self, __encoder: &mut __E) {
                match *self {
                    WorkProduct {
                        cgu_name: ref __binding_0, saved_files: ref __binding_1 } =>
                        {
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_0,
                            __encoder);
                        ::rustc_serialize::Encodable::<__E>::encode(__binding_1,
                            __encoder);
                    }
                }
            }
        }
    };Encodable, const _: () =
    {
        impl<__D: ::rustc_span::SpanDecoder> ::rustc_serialize::Decodable<__D>
            for WorkProduct {
            fn decode(__decoder: &mut __D) -> Self {
                WorkProduct {
                    cgu_name: ::rustc_serialize::Decodable::decode(__decoder),
                    saved_files: ::rustc_serialize::Decodable::decode(__decoder),
                }
            }
        }
    };Decodable)]
1055pub struct WorkProduct {
1056    pub cgu_name: String,
1057    /// Saved files associated with this CGU. In each key/value pair, the value is the path to the
1058    /// saved file and the key is some identifier for the type of file being saved.
1059    ///
1060    /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
1061    /// the object file's path, and "dwo" to the dwarf object file's path.
1062    pub saved_files: UnordMap<String, String>,
1063}
1064
1065pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
1066
1067// Index type for `DepNodeData`'s edges.
1068impl ::std::fmt::Debug for EdgeIndex {
    fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        fmt.write_fmt(format_args!("{0}", self.as_u32()))
    }
}rustc_index::newtype_index! {
1069    struct EdgeIndex {}
1070}
1071
1072/// `CurrentDepGraph` stores the dependency graph for the current session. It
1073/// will be populated as we run queries or tasks. We never remove nodes from the
1074/// graph: they are only added.
1075///
1076/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
1077/// in memory. This is important, because these graph structures are some of the
1078/// largest in the compiler.
1079///
1080/// For this reason, we avoid storing `DepNode`s more than once as map
1081/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous
1082/// graph, and we map nodes in the previous graph to indices via a two-step
1083/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1084/// and the `prev_index_to_index` vector (which is more compact and faster than
1085/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1086///
1087/// This struct uses three locks internally. The `data`, `anon_node_to_index`,
1088/// and `prev_index_to_index` fields are locked separately. Operations that take
1089/// a `DepNodeIndex` typically just access the `data` field.
1090///
1091/// We only need to manipulate at most two locks simultaneously:
1092/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1093/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
1094/// first, and `data` second.
1095pub(super) struct CurrentDepGraph {
1096    encoder: GraphEncoder,
1097    anon_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
1098
1099    /// This is used to verify that value fingerprints do not change between the
1100    /// creation of a node and its recomputation.
1101    #[cfg(debug_assertions)]
1102    value_fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
1103
1104    /// Used to trap when a specific edge is added to the graph.
1105    /// This is used for debug purposes and is only active with `debug_assertions`.
1106    #[cfg(debug_assertions)]
1107    forbidden_edge: Option<EdgeFilter>,
1108
1109    /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1110    /// their edges. This has the beneficial side-effect that multiple anonymous
1111    /// nodes can be coalesced into one without changing the semantics of the
1112    /// dependency graph. However, the merging of nodes can lead to a subtle
1113    /// problem during red-green marking: The color of an anonymous node from
1114    /// the current session might "shadow" the color of the node with the same
1115    /// ID from the previous session. In order to side-step this problem, we make
1116    /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1117    /// This is implemented by mixing a session-key into the ID fingerprint of
1118    /// each anon node. The session-key is a hash of the number of previous sessions.
1119    anon_id_seed: Fingerprint,
1120
1121    /// These are simple counters that are for profiling and
1122    /// debugging and only active with `debug_assertions`.
1123    pub(super) total_read_count: AtomicU64,
1124    pub(super) total_duplicate_read_count: AtomicU64,
1125}
1126
1127impl CurrentDepGraph {
1128    fn new(
1129        session: &Session,
1130        prev_graph_node_count: usize,
1131        encoder: FileEncoder,
1132        previous: Arc<SerializedDepGraph>,
1133    ) -> Self {
1134        let mut stable_hasher = StableHasher::new();
1135        previous.session_count().hash(&mut stable_hasher);
1136        let anon_id_seed = stable_hasher.finish();
1137
1138        #[cfg(debug_assertions)]
1139        let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1140            Ok(s) => match EdgeFilter::new(&s) {
1141                Ok(f) => Some(f),
1142                Err(err) => {
    ::core::panicking::panic_fmt(format_args!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {0}",
            err));
}panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1143            },
1144            Err(_) => None,
1145        };
1146
1147        let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
1148
1149        CurrentDepGraph {
1150            encoder: GraphEncoder::new(session, encoder, prev_graph_node_count, previous),
1151            anon_node_to_index: ShardedHashMap::with_capacity(
1152                // FIXME: The count estimate is off as anon nodes are only a portion of the nodes.
1153                new_node_count_estimate / sharded::shards(),
1154            ),
1155            anon_id_seed,
1156            #[cfg(debug_assertions)]
1157            forbidden_edge,
1158            #[cfg(debug_assertions)]
1159            value_fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1160            total_read_count: AtomicU64::new(0),
1161            total_duplicate_read_count: AtomicU64::new(0),
1162        }
1163    }
1164
1165    #[cfg(debug_assertions)]
1166    fn record_edge(
1167        &self,
1168        dep_node_index: DepNodeIndex,
1169        key: DepNode,
1170        value_fingerprint: Fingerprint,
1171    ) {
1172        if let Some(forbidden_edge) = &self.forbidden_edge {
1173            forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1174        }
1175        let prior_value_fingerprint = *self
1176            .value_fingerprints
1177            .lock()
1178            .get_or_insert_with(dep_node_index, || value_fingerprint);
1179        match (&prior_value_fingerprint, &value_fingerprint) {
    (left_val, right_val) => {
        if !(*left_val == *right_val) {
            let kind = ::core::panicking::AssertKind::Eq;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::Some(format_args!("Unstable fingerprints for {0:?}",
                        key)));
        }
    }
};assert_eq!(prior_value_fingerprint, value_fingerprint, "Unstable fingerprints for {key:?}");
1180    }
1181
1182    /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1183    /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1184    #[inline(always)]
1185    fn alloc_new_node(
1186        &self,
1187        key: DepNode,
1188        edges: EdgesVec,
1189        value_fingerprint: Fingerprint,
1190    ) -> DepNodeIndex {
1191        let dep_node_index = self.encoder.send_new(key, value_fingerprint, edges);
1192
1193        #[cfg(debug_assertions)]
1194        self.record_edge(dep_node_index, key, value_fingerprint);
1195
1196        dep_node_index
1197    }
1198}
1199
1200#[derive(#[automatically_derived]
impl<'a> ::core::fmt::Debug for TaskDepsRef<'a> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            TaskDepsRef::Allow(__self_0) =>
                ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Allow",
                    &__self_0),
            TaskDepsRef::EvalAlways =>
                ::core::fmt::Formatter::write_str(f, "EvalAlways"),
            TaskDepsRef::Ignore =>
                ::core::fmt::Formatter::write_str(f, "Ignore"),
            TaskDepsRef::Forbid =>
                ::core::fmt::Formatter::write_str(f, "Forbid"),
        }
    }
}Debug, #[automatically_derived]
impl<'a> ::core::clone::Clone for TaskDepsRef<'a> {
    #[inline]
    fn clone(&self) -> TaskDepsRef<'a> {
        let _: ::core::clone::AssertParamIsClone<&'a Lock<TaskDeps>>;
        *self
    }
}Clone, #[automatically_derived]
impl<'a> ::core::marker::Copy for TaskDepsRef<'a> { }Copy)]
1201pub enum TaskDepsRef<'a> {
1202    /// New dependencies can be added to the
1203    /// `TaskDeps`. This is used when executing a 'normal' query
1204    /// (no `eval_always` modifier)
1205    Allow(&'a Lock<TaskDeps>),
1206    /// This is used when executing an `eval_always` query. We don't
1207    /// need to track dependencies for a query that's always
1208    /// re-executed -- but we need to know that this is an `eval_always`
1209    /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
1210    /// when directly feeding other queries.
1211    EvalAlways,
1212    /// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
1213    Ignore,
1214    /// Any attempt to add new dependencies will cause a panic.
1215    /// This is used when decoding a query result from disk,
1216    /// to ensure that the decoding process doesn't itself
1217    /// require the execution of any queries.
1218    Forbid,
1219}
1220
1221#[derive(#[automatically_derived]
impl ::core::fmt::Debug for TaskDeps {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field3_finish(f, "TaskDeps",
            "node", &self.node, "reads", &self.reads, "read_set",
            &&self.read_set)
    }
}Debug)]
1222pub struct TaskDeps {
1223    #[cfg(debug_assertions)]
1224    node: Option<DepNode>,
1225
1226    /// A vector of `DepNodeIndex`, basically.
1227    reads: EdgesVec,
1228
1229    /// When adding new edges to `reads` in `DepGraph::read_index` we need to determine if the edge
1230    /// has been seen before. If the number of elements in `reads` is small, we just do a linear
1231    /// scan. If the number is higher, a hashset has better perf. This field is that hashset. It's
1232    /// only used if the number of elements in `reads` exceeds `LINEAR_SCAN_MAX`.
1233    read_set: FxHashSet<DepNodeIndex>,
1234}
1235
1236impl TaskDeps {
1237    /// See `TaskDeps::read_set` above.
1238    const LINEAR_SCAN_MAX: usize = 16;
1239
1240    #[inline]
1241    fn new(#[cfg(debug_assertions)] node: Option<DepNode>, read_set_capacity: usize) -> Self {
1242        TaskDeps {
1243            #[cfg(debug_assertions)]
1244            node,
1245            reads: EdgesVec::new(),
1246            read_set: FxHashSet::with_capacity_and_hasher(read_set_capacity, Default::default()),
1247        }
1248    }
1249}
1250
1251// A data structure that stores Option<DepNodeColor> values as a contiguous
1252// array, using one u32 per entry.
1253pub(super) struct DepNodeColorMap {
1254    values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1255}
1256
1257// All values below `COMPRESSED_RED` are green.
1258const COMPRESSED_RED: u32 = u32::MAX - 1;
1259const COMPRESSED_UNKNOWN: u32 = u32::MAX;
1260
1261impl DepNodeColorMap {
1262    fn new(size: usize) -> DepNodeColorMap {
1263        if true {
    if !(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32) {
        ::core::panicking::panic("assertion failed: COMPRESSED_RED > DepNodeIndex::MAX_AS_U32")
    };
};debug_assert!(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32);
1264        DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_UNKNOWN)).collect() }
1265    }
1266
1267    #[inline]
1268    pub(super) fn current(&self, index: SerializedDepNodeIndex) -> Option<DepNodeIndex> {
1269        let value = self.values[index].load(Ordering::Relaxed);
1270        if value <= DepNodeIndex::MAX_AS_U32 { Some(DepNodeIndex::from_u32(value)) } else { None }
1271    }
1272
1273    /// Atomically sets the color of a previous-session dep node to either green
1274    /// or red, if it has not already been colored.
1275    ///
1276    /// If the node already has a color, the new color is ignored, and the
1277    /// return value indicates the existing color.
1278    #[inline(always)]
1279    pub(super) fn try_set_color(
1280        &self,
1281        prev_index: SerializedDepNodeIndex,
1282        color: DesiredColor,
1283    ) -> TrySetColorResult {
1284        match self.values[prev_index].compare_exchange(
1285            COMPRESSED_UNKNOWN,
1286            match color {
1287                DesiredColor::Red => COMPRESSED_RED,
1288                DesiredColor::Green { index } => index.as_u32(),
1289            },
1290            Ordering::Relaxed,
1291            Ordering::Relaxed,
1292        ) {
1293            Ok(_) => TrySetColorResult::Success,
1294            Err(COMPRESSED_RED) => TrySetColorResult::AlreadyRed,
1295            Err(index) => TrySetColorResult::AlreadyGreen { index: DepNodeIndex::from_u32(index) },
1296        }
1297    }
1298
1299    #[inline]
1300    pub(super) fn get(&self, index: SerializedDepNodeIndex) -> DepNodeColor {
1301        let value = self.values[index].load(Ordering::Acquire);
1302        // Green is by far the most common case. Check for that first so we can succeed with a
1303        // single comparison.
1304        if value < COMPRESSED_RED {
1305            DepNodeColor::Green(DepNodeIndex::from_u32(value))
1306        } else if value == COMPRESSED_RED {
1307            DepNodeColor::Red
1308        } else {
1309            if true {
    match (&value, &COMPRESSED_UNKNOWN) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(value, COMPRESSED_UNKNOWN);
1310            DepNodeColor::Unknown
1311        }
1312    }
1313}
1314
1315/// The color that [`DepNodeColorMap::try_set_color`] should try to apply to a node.
1316#[derive(#[automatically_derived]
impl ::core::clone::Clone for DesiredColor {
    #[inline]
    fn clone(&self) -> DesiredColor {
        let _: ::core::clone::AssertParamIsClone<DepNodeIndex>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for DesiredColor { }Copy, #[automatically_derived]
impl ::core::fmt::Debug for DesiredColor {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            DesiredColor::Red => ::core::fmt::Formatter::write_str(f, "Red"),
            DesiredColor::Green { index: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f, "Green",
                    "index", &__self_0),
        }
    }
}Debug)]
1317pub(super) enum DesiredColor {
1318    /// Try to mark the node red.
1319    Red,
1320    /// Try to mark the node green, associating it with a current-session node index.
1321    Green { index: DepNodeIndex },
1322}
1323
1324/// Return value of [`DepNodeColorMap::try_set_color`], indicating success or failure,
1325/// and (on failure) what the existing color is.
1326#[derive(#[automatically_derived]
impl ::core::clone::Clone for TrySetColorResult {
    #[inline]
    fn clone(&self) -> TrySetColorResult {
        let _: ::core::clone::AssertParamIsClone<DepNodeIndex>;
        *self
    }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for TrySetColorResult { }Copy, #[automatically_derived]
impl ::core::fmt::Debug for TrySetColorResult {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            TrySetColorResult::Success =>
                ::core::fmt::Formatter::write_str(f, "Success"),
            TrySetColorResult::AlreadyRed =>
                ::core::fmt::Formatter::write_str(f, "AlreadyRed"),
            TrySetColorResult::AlreadyGreen { index: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "AlreadyGreen", "index", &__self_0),
        }
    }
}Debug)]
1327pub(super) enum TrySetColorResult {
1328    /// The [`DesiredColor`] was freshly applied to the node.
1329    Success,
1330    /// Coloring failed because the node was already marked red.
1331    AlreadyRed,
1332    /// Coloring failed because the node was already marked green,
1333    /// and corresponds to node `index` in the current-session dep graph.
1334    AlreadyGreen { index: DepNodeIndex },
1335}
1336
1337#[inline(never)]
1338#[cold]
1339pub(crate) fn print_markframe_trace(graph: &DepGraph, frame: &MarkFrame<'_>) {
1340    let data = graph.data.as_ref().unwrap();
1341
1342    {
    ::std::io::_eprint(format_args!("there was a panic while trying to force a dep node\n"));
};eprintln!("there was a panic while trying to force a dep node");
1343    { ::std::io::_eprint(format_args!("try_mark_green dep node stack:\n")); };eprintln!("try_mark_green dep node stack:");
1344
1345    let mut i = 0;
1346    let mut current = Some(frame);
1347    while let Some(frame) = current {
1348        let node = data.previous.index_to_node(frame.index);
1349        { ::std::io::_eprint(format_args!("#{0} {1:?}\n", i, node)); };eprintln!("#{i} {node:?}");
1350        current = frame.parent;
1351        i += 1;
1352    }
1353
1354    {
    ::std::io::_eprint(format_args!("end of try_mark_green dep node stack\n"));
};eprintln!("end of try_mark_green dep node stack");
1355}
1356
1357#[cold]
1358#[inline(never)]
1359fn panic_on_forbidden_read(data: &DepGraphData, dep_node_index: DepNodeIndex) -> ! {
1360    // We have to do an expensive reverse-lookup of the DepNode that
1361    // corresponds to `dep_node_index`, but that's OK since we are about
1362    // to ICE anyway.
1363    let mut dep_node = None;
1364
1365    // First try to find the dep node among those that already existed in the
1366    // previous session and has been marked green
1367    for prev_index in data.colors.values.indices() {
1368        if data.colors.current(prev_index) == Some(dep_node_index) {
1369            dep_node = Some(*data.previous.index_to_node(prev_index));
1370            break;
1371        }
1372    }
1373
1374    let dep_node = dep_node.map_or_else(
1375        || ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("with index {0:?}", dep_node_index))
    })format!("with index {:?}", dep_node_index),
1376        |dep_node| ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("`{0:?}`", dep_node))
    })format!("`{:?}`", dep_node),
1377    );
1378
1379    {
    ::core::panicking::panic_fmt(format_args!("Error: trying to record dependency on DepNode {0} in a context that does not allow it (e.g. during query deserialization). The most common case of recording a dependency on a DepNode `foo` is when the corresponding query `foo` is invoked. Invoking queries is not allowed as part of loading something from the incremental on-disk cache. See <https://github.com/rust-lang/rust/pull/91919>.",
            dep_node));
}panic!(
1380        "Error: trying to record dependency on DepNode {dep_node} in a \
1381         context that does not allow it (e.g. during query deserialization). \
1382         The most common case of recording a dependency on a DepNode `foo` is \
1383         when the corresponding query `foo` is invoked. Invoking queries is not \
1384         allowed as part of loading something from the incremental on-disk cache. \
1385         See <https://github.com/rust-lang/rust/pull/91919>."
1386    )
1387}
1388
1389impl<'tcx> TyCtxt<'tcx> {
1390    /// Return whether this kind always require evaluation.
1391    #[inline(always)]
1392    fn is_eval_always(self, kind: DepKind) -> bool {
1393        self.dep_kind_vtable(kind).is_eval_always
1394    }
1395}