1//! The search graph is responsible for caching and cycle detection in the trait
2//! solver. Making sure that caching doesn't result in soundness bugs or unstable
3//! query results is very challenging and makes this one of the most-involved
4//! self-contained components of the compiler.
5//!
6//! We added fuzzing support to test its correctness. The fuzzers used to verify
7//! the current implementation can be found in <https://github.com/lcnr/search_graph_fuzz>.
8//!
9//! This is just a quick overview of the general design, please check out the relevant
10//! [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for
11//! more details. Caching is split between a global cache and the per-cycle `provisional_cache`.
12//! The global cache has to be completely unobservable, while the per-cycle cache may impact
13//! behavior as long as the resulting behavior is still correct.
14use std::cmp::Ordering;
15use std::collections::hash_map::Entry;
16use std::collections::{BTreeMap, btree_map};
17use std::fmt::Debug;
18use std::hash::Hash;
19use std::iter;
20use std::marker::PhantomData;
2122use derive_where::derive_where;
23#[cfg(feature = "nightly")]
24use rustc_macros::{Decodable_NoContext, Encodable_NoContext, StableHash};
25use rustc_type_ir::data_structures::HashMap;
26use tracing::{debug, instrument, trace};
2728mod stack;
29use stack::{Stack, StackDepth, StackEntry};
30mod global_cache;
31use global_cache::CacheData;
32pub use global_cache::GlobalCache;
3334/// The search graph does not simply use `Interner` directly
35/// to enable its fuzzing without having to stub the rest of
36/// the interner. We don't make this a super trait of `Interner`
37/// as users of the shared type library shouldn't have to care
38/// about `Input` and `Result` as they are implementation details
39/// of the search graph.
40pub trait Cx: Copy {
41type Input: Debug + Eq + Hash + Copy;
42type Result: Debug + Eq + Hash + Copy;
43type AmbiguityInfo: Debug + Eq + Hash + Copy;
4445type DepNodeIndex;
46type Tracked<T: Debug + Clone>: Debug;
47fn mk_tracked<T: Debug + Clone>(
48self,
49 data: T,
50 dep_node_index: Self::DepNodeIndex,
51 ) -> Self::Tracked<T>;
52fn get_tracked<T: Debug + Clone>(self, tracked: &Self::Tracked<T>) -> T;
53fn with_cached_task<T>(self, task: impl FnOnce() -> T) -> (T, Self::DepNodeIndex);
5455fn with_global_cache<R>(self, f: impl FnOnce(&mut GlobalCache<Self>) -> R) -> R;
5657fn assert_evaluation_is_concurrent(&self);
58}
5960pub trait Delegate: Sized {
61type Cx: Cx;
62/// Whether to use the provisional cache. Set to `false` by a fuzzer when
63 /// validating the search graph.
64const ENABLE_PROVISIONAL_CACHE: bool;
65type ValidationScope;
66/// Returning `Some` disables the global cache for the current goal.
67 ///
68 /// The `ValidationScope` is used when fuzzing the search graph to track
69 /// for which goals the global cache has been disabled. This is necessary
70 /// as we may otherwise ignore the global cache entry for some goal `G`
71 /// only to later use it, failing to detect a cycle goal and potentially
72 /// changing the result.
73fn enter_validation_scope(
74 cx: Self::Cx,
75 input: <Self::Cx as Cx>::Input,
76 ) -> Option<Self::ValidationScope>;
7778const FIXPOINT_STEP_LIMIT: usize;
7980type ProofTreeBuilder;
81fn inspect_is_noop(inspect: &mut Self::ProofTreeBuilder) -> bool;
8283const DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW: usize;
8485fn initial_provisional_result(
86 cx: Self::Cx,
87 kind: PathKind,
88 input: <Self::Cx as Cx>::Input,
89 ) -> <Self::Cx as Cx>::Result;
90fn is_initial_provisional_result(result: <Self::Cx as Cx>::Result) -> Option<PathKind>;
91fn stack_overflow_result(
92 cx: Self::Cx,
93 input: <Self::Cx as Cx>::Input,
94 ) -> <Self::Cx as Cx>::Result;
95fn fixpoint_overflow_result(
96 cx: Self::Cx,
97 input: <Self::Cx as Cx>::Input,
98 ) -> <Self::Cx as Cx>::Result;
99100fn is_ambiguous_result(
101 result: <Self::Cx as Cx>::Result,
102 ) -> Option<<Self::Cx as Cx>::AmbiguityInfo>;
103fn propagate_ambiguity(
104 cx: Self::Cx,
105 for_input: <Self::Cx as Cx>::Input,
106 ambiguity_info: <Self::Cx as Cx>::AmbiguityInfo,
107 ) -> <Self::Cx as Cx>::Result;
108109fn compute_goal(
110 search_graph: &mut SearchGraph<Self>,
111 cx: Self::Cx,
112 input: <Self::Cx as Cx>::Input,
113 inspect: &mut Self::ProofTreeBuilder,
114 ) -> <Self::Cx as Cx>::Result;
115}
116117/// In the initial iteration of a cycle, we do not yet have a provisional
118/// result. In the case we return an initial provisional result depending
119/// on the kind of cycle.
120#[derive(#[automatically_derived]
impl ::core::fmt::Debug for PathKind {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
PathKind::Inductive => "Inductive",
PathKind::Unknown => "Unknown",
PathKind::Coinductive => "Coinductive",
PathKind::ForcedAmbiguity => "ForcedAmbiguity",
})
}
}Debug, #[automatically_derived]
impl ::core::clone::Clone for PathKind {
#[inline]
fn clone(&self) -> PathKind { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for PathKind { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for PathKind {
#[inline]
fn eq(&self, other: &PathKind) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PathKind {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for PathKind {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state)
}
}Hash)]
121#[cfg_attr(feature = "nightly", derive(const _: () =
{
impl<__D: ::rustc_serialize::Decoder>
::rustc_serialize::Decodable<__D> for PathKind {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => { PathKind::Inductive }
1usize => { PathKind::Unknown }
2usize => { PathKind::Coinductive }
3usize => { PathKind::ForcedAmbiguity }
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `PathKind`, expected 0..4, actual {0}",
n));
}
}
}
}
};Decodable_NoContext, const _: () =
{
impl<__E: ::rustc_serialize::Encoder>
::rustc_serialize::Encodable<__E> for PathKind {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
PathKind::Inductive => { 0usize }
PathKind::Unknown => { 1usize }
PathKind::Coinductive => { 2usize }
PathKind::ForcedAmbiguity => { 3usize }
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
PathKind::Inductive => {}
PathKind::Unknown => {}
PathKind::Coinductive => {}
PathKind::ForcedAmbiguity => {}
}
}
}
};Encodable_NoContext, const _: () =
{
impl ::rustc_data_structures::stable_hasher::StableHash for PathKind {
#[inline]
fn stable_hash<__Hcx: ::rustc_data_structures::stable_hasher::StableHashCtxt>(&self,
__hcx: &mut __Hcx,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).stable_hash(__hcx, __hasher);
match *self {
PathKind::Inductive => {}
PathKind::Unknown => {}
PathKind::Coinductive => {}
PathKind::ForcedAmbiguity => {}
}
}
}
};StableHash))]
122pub enum PathKind {
123/// A path consisting of only inductive/unproductive steps. Their initial
124 /// provisional result is `Err(NoSolution)`. We currently treat them as
125 /// `PathKind::Unknown` during coherence until we're fully confident in
126 /// our approach.
127Inductive,
128/// A path which is not be coinductive right now but we may want
129 /// to change of them to be so in the future. We return an ambiguous
130 /// result in this case to prevent people from relying on this.
131Unknown,
132/// A path with at least one coinductive step. Such cycles hold.
133Coinductive,
134/// A path which is treated as ambiguous. Once a path has this path kind
135 /// any other segment does not change its kind.
136 ///
137 /// This is currently only used when fuzzing to support negative reasoning.
138 /// For more details, see #143054.
139ForcedAmbiguity,
140}
141142impl PathKind {
143/// Returns the path kind when merging `self` with `rest`.
144 ///
145 /// Given an inductive path `self` and a coinductive path `rest`,
146 /// the path `self -> rest` would be coinductive.
147 ///
148 /// This operation represents an ordering and would be equivalent
149 /// to `max(self, rest)`.
150fn extend(self, rest: PathKind) -> PathKind {
151match (self, rest) {
152 (PathKind::ForcedAmbiguity, _) | (_, PathKind::ForcedAmbiguity) => {
153 PathKind::ForcedAmbiguity154 }
155 (PathKind::Coinductive, _) | (_, PathKind::Coinductive) => PathKind::Coinductive,
156 (PathKind::Unknown, _) | (_, PathKind::Unknown) => PathKind::Unknown,
157 (PathKind::Inductive, PathKind::Inductive) => PathKind::Inductive,
158 }
159 }
160}
161162/// The kinds of cycles a cycle head was involved in.
163///
164/// This is used to avoid rerunning a cycle if there's
165/// just a single usage kind and the final result matches
166/// its provisional result.
167///
168/// While it tracks the amount of usages using `u32`, we only ever
169/// care whether there are any. We only count them to be able to ignore
170/// usages from irrelevant candidates while evaluating a goal.
171///
172/// This cares about how nested goals relied on a cycle head. It does
173/// not care about how frequently the nested goal relied on it.
174#[derive(#[automatically_derived]
impl ::core::default::Default for HeadUsages {
#[inline]
fn default() -> HeadUsages {
HeadUsages {
inductive: ::core::default::Default::default(),
unknown: ::core::default::Default::default(),
coinductive: ::core::default::Default::default(),
forced_ambiguity: ::core::default::Default::default(),
}
}
}Default, #[automatically_derived]
impl ::core::fmt::Debug for HeadUsages {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "HeadUsages",
"inductive", &self.inductive, "unknown", &self.unknown,
"coinductive", &self.coinductive, "forced_ambiguity",
&&self.forced_ambiguity)
}
}Debug, #[automatically_derived]
impl ::core::clone::Clone for HeadUsages {
#[inline]
fn clone(&self) -> HeadUsages {
let _: ::core::clone::AssertParamIsClone<u32>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for HeadUsages { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for HeadUsages {
#[inline]
fn eq(&self, other: &HeadUsages) -> bool {
self.inductive == other.inductive && self.unknown == other.unknown &&
self.coinductive == other.coinductive &&
self.forced_ambiguity == other.forced_ambiguity
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for HeadUsages {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u32>;
}
}Eq)]
175struct HeadUsages {
176 inductive: u32,
177 unknown: u32,
178 coinductive: u32,
179 forced_ambiguity: u32,
180}
181182impl HeadUsages {
183fn add_usage(&mut self, path: PathKind) {
184match path {
185 PathKind::Inductive => self.inductive += 1,
186 PathKind::Unknown => self.unknown += 1,
187 PathKind::Coinductive => self.coinductive += 1,
188 PathKind::ForcedAmbiguity => self.forced_ambiguity += 1,
189 }
190 }
191192/// This adds the usages which occurred while computing a nested goal.
193 ///
194 /// We don't actually care about how frequently the nested goal relied
195 /// on its cycle heads, only whether it did.
196fn add_usages_from_nested(&mut self, usages: HeadUsages) {
197let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = usages;
198self.inductive += if inductive == 0 { 0 } else { 1 };
199self.unknown += if unknown == 0 { 0 } else { 1 };
200self.coinductive += if coinductive == 0 { 0 } else { 1 };
201self.forced_ambiguity += if forced_ambiguity == 0 { 0 } else { 1 };
202 }
203204fn ignore_usages(&mut self, usages: HeadUsages) {
205let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = usages;
206self.inductive = self.inductive.checked_sub(inductive).unwrap();
207self.unknown = self.unknown.checked_sub(unknown).unwrap();
208self.coinductive = self.coinductive.checked_sub(coinductive).unwrap();
209self.forced_ambiguity = self.forced_ambiguity.checked_sub(forced_ambiguity).unwrap();
210 }
211212fn is_empty(self) -> bool {
213let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = self;
214inductive == 0 && unknown == 0 && coinductive == 0 && forced_ambiguity == 0
215}
216217fn is_single(self, path_kind: PathKind) -> bool {
218match path_kind {
219 PathKind::Inductive => #[allow(non_exhaustive_omitted_patterns)] match self {
HeadUsages { inductive: _, unknown: 0, coinductive: 0, forced_ambiguity: 0
} => true,
_ => false,
}matches!(
220self,
221 HeadUsages { inductive: _, unknown: 0, coinductive: 0, forced_ambiguity: 0 },
222 ),
223 PathKind::Unknown => #[allow(non_exhaustive_omitted_patterns)] match self {
HeadUsages { inductive: 0, unknown: _, coinductive: 0, forced_ambiguity: 0
} => true,
_ => false,
}matches!(
224self,
225 HeadUsages { inductive: 0, unknown: _, coinductive: 0, forced_ambiguity: 0 },
226 ),
227 PathKind::Coinductive => #[allow(non_exhaustive_omitted_patterns)] match self {
HeadUsages { inductive: 0, unknown: 0, coinductive: _, forced_ambiguity: 0
} => true,
_ => false,
}matches!(
228self,
229 HeadUsages { inductive: 0, unknown: 0, coinductive: _, forced_ambiguity: 0 },
230 ),
231 PathKind::ForcedAmbiguity => #[allow(non_exhaustive_omitted_patterns)] match self {
HeadUsages { inductive: 0, unknown: 0, coinductive: 0, forced_ambiguity: _
} => true,
_ => false,
}matches!(
232self,
233 HeadUsages { inductive: 0, unknown: 0, coinductive: 0, forced_ambiguity: _ },
234 ),
235 }
236 }
237}
238239#[derive(#[automatically_derived]
impl ::core::fmt::Debug for CandidateHeadUsages {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field1_finish(f,
"CandidateHeadUsages", "usages", &&self.usages)
}
}Debug, #[automatically_derived]
impl ::core::default::Default for CandidateHeadUsages {
#[inline]
fn default() -> CandidateHeadUsages {
CandidateHeadUsages { usages: ::core::default::Default::default() }
}
}Default)]
240pub struct CandidateHeadUsages {
241 usages: Option<Box<HashMap<StackDepth, HeadUsages>>>,
242}
243impl CandidateHeadUsages {
244pub fn merge_usages(&mut self, other: CandidateHeadUsages) {
245if let Some(other_usages) = other.usages {
246if let Some(ref mut self_usages) = self.usages {
247// Each head is merged independently, so the final usage counts are the same
248 // regardless of hash iteration order.
249#[allow(rustc::potential_query_instability)]
250for (head_index, head) in other_usages.into_iter() {
251let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = head;
252let self_usages = self_usages.entry(head_index).or_default();
253 self_usages.inductive += inductive;
254 self_usages.unknown += unknown;
255 self_usages.coinductive += coinductive;
256 self_usages.forced_ambiguity += forced_ambiguity;
257 }
258 } else {
259self.usages = Some(other_usages);
260 }
261 }
262 }
263}
264265#[derive(#[automatically_derived]
impl ::core::fmt::Debug for AvailableDepth {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "AvailableDepth",
&&self.0)
}
}Debug, #[automatically_derived]
impl ::core::clone::Clone for AvailableDepth {
#[inline]
fn clone(&self) -> AvailableDepth {
let _: ::core::clone::AssertParamIsClone<usize>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for AvailableDepth { }Copy)]
266struct AvailableDepth(usize);
267impl AvailableDepth {
268/// Returns the remaining depth allowed for nested goals.
269 ///
270 /// This is generally simply one less than the current depth.
271 /// However, if we encountered overflow, we significantly reduce
272 /// the remaining depth of all nested goals to prevent hangs
273 /// in case there is exponential blowup.
274fn allowed_depth_for_nested<D: Delegate>(
275 root_depth: AvailableDepth,
276 stack: &Stack<D::Cx>,
277 ) -> Option<AvailableDepth> {
278if let Some(last) = stack.last() {
279if last.available_depth.0 == 0 {
280return None;
281 }
282283Some(if last.encountered_overflow {
284AvailableDepth(last.available_depth.0 / D::DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW)
285 } else {
286AvailableDepth(last.available_depth.0 - 1)
287 })
288 } else {
289Some(root_depth)
290 }
291 }
292293/// Whether we're allowed to use a global cache entry which required
294 /// the given depth.
295fn cache_entry_is_applicable(self, additional_depth: usize) -> bool {
296self.0 >= additional_depth297 }
298}
299300#[derive(#[automatically_derived]
impl ::core::clone::Clone for CycleHead {
#[inline]
fn clone(&self) -> CycleHead {
let _: ::core::clone::AssertParamIsClone<PathsToNested>;
let _: ::core::clone::AssertParamIsClone<HeadUsages>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for CycleHead { }Copy, #[automatically_derived]
impl ::core::fmt::Debug for CycleHead {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "CycleHead",
"paths_to_head", &self.paths_to_head, "usages", &&self.usages)
}
}Debug)]
301struct CycleHead {
302 paths_to_head: PathsToNested,
303/// If the `usages` are empty, the result of that head does not matter
304 /// for the current goal. However, we still don't completely drop this
305 /// cycle head as whether or not it exists impacts which queries we
306 /// access, so ignoring it would cause incremental compilation verification
307 /// failures or hide query cycles.
308usages: HeadUsages,
309}
310311/// All cycle heads a given goal depends on, ordered by their stack depth.
312///
313/// We also track all paths from this goal to that head. This is necessary
314/// when rebasing provisional cache results.
315#[derive(#[automatically_derived]
impl ::core::clone::Clone for CycleHeads {
#[inline]
fn clone(&self) -> CycleHeads {
CycleHeads { heads: ::core::clone::Clone::clone(&self.heads) }
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for CycleHeads {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field1_finish(f, "CycleHeads",
"heads", &&self.heads)
}
}Debug, #[automatically_derived]
impl ::core::default::Default for CycleHeads {
#[inline]
fn default() -> CycleHeads {
CycleHeads { heads: ::core::default::Default::default() }
}
}Default)]
316struct CycleHeads {
317 heads: BTreeMap<StackDepth, CycleHead>,
318}
319320impl CycleHeads {
321fn is_empty(&self) -> bool {
322self.heads.is_empty()
323 }
324325fn highest_cycle_head(&self) -> (StackDepth, CycleHead) {
326self.heads.last_key_value().map(|(k, v)| (*k, *v)).unwrap()
327 }
328329fn highest_cycle_head_index(&self) -> StackDepth {
330self.opt_highest_cycle_head_index().unwrap()
331 }
332333fn opt_highest_cycle_head_index(&self) -> Option<StackDepth> {
334self.heads.last_key_value().map(|(k, _)| *k)
335 }
336337fn opt_lowest_cycle_head_index(&self) -> Option<StackDepth> {
338self.heads.first_key_value().map(|(k, _)| *k)
339 }
340341fn remove_highest_cycle_head(&mut self) -> CycleHead {
342let last = self.heads.pop_last();
343last.unwrap().1
344}
345346fn insert(
347&mut self,
348 head_index: StackDepth,
349 path_from_entry: impl Into<PathsToNested> + Copy,
350 usages: HeadUsages,
351 ) {
352match self.heads.entry(head_index) {
353 btree_map::Entry::Vacant(entry) => {
354entry.insert(CycleHead { paths_to_head: path_from_entry.into(), usages });
355 }
356 btree_map::Entry::Occupied(entry) => {
357let head = entry.into_mut();
358head.paths_to_head |= path_from_entry.into();
359head.usages.add_usages_from_nested(usages);
360 }
361 }
362 }
363364fn ignore_usages(&mut self, head_index: StackDepth, usages: HeadUsages) {
365self.heads.get_mut(&head_index).unwrap().usages.ignore_usages(usages)
366 }
367368fn iter(&self) -> impl Iterator<Item = (StackDepth, CycleHead)> + '_ {
369self.heads.iter().map(|(k, v)| (*k, *v))
370 }
371}
372373#[doc =
r" Tracks how nested goals have been accessed. This is necessary to disable"]
#[doc =
r" global cache entries if computing them would otherwise result in a cycle or"]
#[doc = r" access a provisional cache entry."]
pub struct PathsToNested(<PathsToNested as
::bitflags::__private::PublicFlags>::Internal);
#[automatically_derived]
impl ::core::fmt::Debug for PathsToNested {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "PathsToNested",
&&self.0)
}
}
#[automatically_derived]
#[doc(hidden)]
unsafe impl ::core::clone::TrivialClone for PathsToNested { }
#[automatically_derived]
impl ::core::clone::Clone for PathsToNested {
#[inline]
fn clone(&self) -> PathsToNested {
let _:
::core::clone::AssertParamIsClone<<PathsToNested as
::bitflags::__private::PublicFlags>::Internal>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for PathsToNested { }
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for PathsToNested { }
#[automatically_derived]
impl ::core::cmp::PartialEq for PathsToNested {
#[inline]
fn eq(&self, other: &PathsToNested) -> bool { self.0 == other.0 }
}
#[automatically_derived]
impl ::core::cmp::Eq for PathsToNested {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _:
::core::cmp::AssertParamIsEq<<PathsToNested as
::bitflags::__private::PublicFlags>::Internal>;
}
}
impl PathsToNested {
#[doc = r" The initial value when adding a goal to its own nested goals."]
#[allow(deprecated, non_upper_case_globals,)]
pub const EMPTY: Self = Self::from_bits_retain(1 << 0);
#[allow(deprecated, non_upper_case_globals,)]
pub const INDUCTIVE: Self = Self::from_bits_retain(1 << 1);
#[allow(deprecated, non_upper_case_globals,)]
pub const UNKNOWN: Self = Self::from_bits_retain(1 << 2);
#[allow(deprecated, non_upper_case_globals,)]
pub const COINDUCTIVE: Self = Self::from_bits_retain(1 << 3);
#[allow(deprecated, non_upper_case_globals,)]
pub const FORCED_AMBIGUITY: Self = Self::from_bits_retain(1 << 4);
}
impl ::bitflags::Flags for PathsToNested {
const FLAGS: &'static [::bitflags::Flag<PathsToNested>] =
&[{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("EMPTY", PathsToNested::EMPTY)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("INDUCTIVE", PathsToNested::INDUCTIVE)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("UNKNOWN", PathsToNested::UNKNOWN)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("COINDUCTIVE",
PathsToNested::COINDUCTIVE)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("FORCED_AMBIGUITY",
PathsToNested::FORCED_AMBIGUITY)
}];
type Bits = u8;
fn bits(&self) -> u8 { PathsToNested::bits(self) }
fn from_bits_retain(bits: u8) -> PathsToNested {
PathsToNested::from_bits_retain(bits)
}
}
#[allow(dead_code, deprecated, unused_doc_comments, unused_attributes,
unused_mut, unused_imports, non_upper_case_globals, clippy ::
assign_op_pattern, clippy :: indexing_slicing, clippy :: same_name_method,
clippy :: iter_without_into_iter,)]
const _: () =
{
#[repr(transparent)]
pub struct InternalBitFlags(u8);
#[automatically_derived]
#[doc(hidden)]
unsafe impl ::core::clone::TrivialClone for InternalBitFlags { }
#[automatically_derived]
impl ::core::clone::Clone for InternalBitFlags {
#[inline]
fn clone(&self) -> InternalBitFlags {
let _: ::core::clone::AssertParamIsClone<u8>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for InternalBitFlags { }
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for InternalBitFlags { }
#[automatically_derived]
impl ::core::cmp::PartialEq for InternalBitFlags {
#[inline]
fn eq(&self, other: &InternalBitFlags) -> bool {
self.0 == other.0
}
}
#[automatically_derived]
impl ::core::cmp::Eq for InternalBitFlags {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u8>;
}
}
#[automatically_derived]
impl ::core::cmp::PartialOrd for InternalBitFlags {
#[inline]
fn partial_cmp(&self, other: &InternalBitFlags)
-> ::core::option::Option<::core::cmp::Ordering> {
::core::cmp::PartialOrd::partial_cmp(&self.0, &other.0)
}
}
#[automatically_derived]
impl ::core::cmp::Ord for InternalBitFlags {
#[inline]
fn cmp(&self, other: &InternalBitFlags) -> ::core::cmp::Ordering {
::core::cmp::Ord::cmp(&self.0, &other.0)
}
}
#[automatically_derived]
impl ::core::hash::Hash for InternalBitFlags {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.0, state)
}
}
impl ::bitflags::__private::PublicFlags for PathsToNested {
type Primitive = u8;
type Internal = InternalBitFlags;
}
impl ::bitflags::__private::core::default::Default for
InternalBitFlags {
#[inline]
fn default() -> Self { InternalBitFlags::empty() }
}
impl ::bitflags::__private::core::fmt::Debug for InternalBitFlags {
fn fmt(&self,
f: &mut ::bitflags::__private::core::fmt::Formatter<'_>)
-> ::bitflags::__private::core::fmt::Result {
if self.is_empty() {
f.write_fmt(format_args!("{0:#x}",
<u8 as ::bitflags::Bits>::EMPTY))
} else {
::bitflags::__private::core::fmt::Display::fmt(self, f)
}
}
}
impl ::bitflags::__private::core::fmt::Display for InternalBitFlags {
fn fmt(&self,
f: &mut ::bitflags::__private::core::fmt::Formatter<'_>)
-> ::bitflags::__private::core::fmt::Result {
::bitflags::parser::to_writer(&PathsToNested(*self), f)
}
}
impl ::bitflags::__private::core::str::FromStr for InternalBitFlags {
type Err = ::bitflags::parser::ParseError;
fn from_str(s: &str)
->
::bitflags::__private::core::result::Result<Self,
Self::Err> {
::bitflags::parser::from_str::<PathsToNested>(s).map(|flags|
flags.0)
}
}
impl ::bitflags::__private::core::convert::AsRef<u8> for
InternalBitFlags {
fn as_ref(&self) -> &u8 { &self.0 }
}
impl ::bitflags::__private::core::convert::From<u8> for
InternalBitFlags {
fn from(bits: u8) -> Self { Self::from_bits_retain(bits) }
}
#[allow(dead_code, deprecated, unused_attributes)]
impl InternalBitFlags {
/// Get a flags value with all bits unset.
#[inline]
pub const fn empty() -> Self {
Self(<u8 as ::bitflags::Bits>::EMPTY)
}
/// Get a flags value with all known bits set.
#[inline]
pub const fn all() -> Self {
let mut truncated = <u8 as ::bitflags::Bits>::EMPTY;
let mut i = 0;
{
{
let flag =
<PathsToNested as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<PathsToNested as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<PathsToNested as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<PathsToNested as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<PathsToNested as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
let _ = i;
Self(truncated)
}
/// Get the underlying bits value.
///
/// The returned value is exactly the bits set in this flags value.
#[inline]
pub const fn bits(&self) -> u8 { self.0 }
/// Convert from a bits value.
///
/// This method will return `None` if any unknown bits are set.
#[inline]
pub const fn from_bits(bits: u8)
-> ::bitflags::__private::core::option::Option<Self> {
let truncated = Self::from_bits_truncate(bits).0;
if truncated == bits {
::bitflags::__private::core::option::Option::Some(Self(bits))
} else { ::bitflags::__private::core::option::Option::None }
}
/// Convert from a bits value, unsetting any unknown bits.
#[inline]
pub const fn from_bits_truncate(bits: u8) -> Self {
Self(bits & Self::all().0)
}
/// Convert from a bits value exactly.
#[inline]
pub const fn from_bits_retain(bits: u8) -> Self { Self(bits) }
/// Get a flags value with the bits of a flag with the given name set.
///
/// This method will return `None` if `name` is empty or doesn't
/// correspond to any named flag.
#[inline]
pub fn from_name(name: &str)
-> ::bitflags::__private::core::option::Option<Self> {
{
if name == "EMPTY" {
return ::bitflags::__private::core::option::Option::Some(Self(PathsToNested::EMPTY.bits()));
}
};
;
{
if name == "INDUCTIVE" {
return ::bitflags::__private::core::option::Option::Some(Self(PathsToNested::INDUCTIVE.bits()));
}
};
;
{
if name == "UNKNOWN" {
return ::bitflags::__private::core::option::Option::Some(Self(PathsToNested::UNKNOWN.bits()));
}
};
;
{
if name == "COINDUCTIVE" {
return ::bitflags::__private::core::option::Option::Some(Self(PathsToNested::COINDUCTIVE.bits()));
}
};
;
{
if name == "FORCED_AMBIGUITY" {
return ::bitflags::__private::core::option::Option::Some(Self(PathsToNested::FORCED_AMBIGUITY.bits()));
}
};
;
let _ = name;
::bitflags::__private::core::option::Option::None
}
/// Whether all bits in this flags value are unset.
#[inline]
pub const fn is_empty(&self) -> bool {
self.0 == <u8 as ::bitflags::Bits>::EMPTY
}
/// Whether all known bits in this flags value are set.
#[inline]
pub const fn is_all(&self) -> bool {
Self::all().0 | self.0 == self.0
}
/// Whether any set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn intersects(&self, other: Self) -> bool {
self.0 & other.0 != <u8 as ::bitflags::Bits>::EMPTY
}
/// Whether all set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn contains(&self, other: Self) -> bool {
self.0 & other.0 == other.0
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
pub fn insert(&mut self, other: Self) {
*self = Self(self.0).union(other);
}
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `remove` won't truncate `other`, but the `!` operator will.
#[inline]
pub fn remove(&mut self, other: Self) {
*self = Self(self.0).difference(other);
}
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
pub fn toggle(&mut self, other: Self) {
*self = Self(self.0).symmetric_difference(other);
}
/// Call `insert` when `value` is `true` or `remove` when `value` is `false`.
#[inline]
pub fn set(&mut self, other: Self, value: bool) {
if value { self.insert(other); } else { self.remove(other); }
}
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn intersection(self, other: Self) -> Self {
Self(self.0 & other.0)
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn union(self, other: Self) -> Self {
Self(self.0 | other.0)
}
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
#[must_use]
pub const fn difference(self, other: Self) -> Self {
Self(self.0 & !other.0)
}
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn symmetric_difference(self, other: Self) -> Self {
Self(self.0 ^ other.0)
}
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
#[must_use]
pub const fn complement(self) -> Self {
Self::from_bits_truncate(!self.0)
}
}
impl ::bitflags::__private::core::fmt::Binary for InternalBitFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::Octal for InternalBitFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::LowerHex for InternalBitFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::UpperHex for InternalBitFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::ops::BitOr for InternalBitFlags {
type Output = Self;
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor(self, other: InternalBitFlags) -> Self {
self.union(other)
}
}
impl ::bitflags::__private::core::ops::BitOrAssign for
InternalBitFlags {
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor_assign(&mut self, other: Self) { self.insert(other); }
}
impl ::bitflags::__private::core::ops::BitXor for InternalBitFlags {
type Output = Self;
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor(self, other: Self) -> Self {
self.symmetric_difference(other)
}
}
impl ::bitflags::__private::core::ops::BitXorAssign for
InternalBitFlags {
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
}
impl ::bitflags::__private::core::ops::BitAnd for InternalBitFlags {
type Output = Self;
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand(self, other: Self) -> Self { self.intersection(other) }
}
impl ::bitflags::__private::core::ops::BitAndAssign for
InternalBitFlags {
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand_assign(&mut self, other: Self) {
*self =
Self::from_bits_retain(self.bits()).intersection(other);
}
}
impl ::bitflags::__private::core::ops::Sub for InternalBitFlags {
type Output = Self;
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub(self, other: Self) -> Self { self.difference(other) }
}
impl ::bitflags::__private::core::ops::SubAssign for InternalBitFlags
{
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub_assign(&mut self, other: Self) { self.remove(other); }
}
impl ::bitflags::__private::core::ops::Not for InternalBitFlags {
type Output = Self;
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
fn not(self) -> Self { self.complement() }
}
impl ::bitflags::__private::core::iter::Extend<InternalBitFlags> for
InternalBitFlags {
/// The bitwise or (`|`) of the bits in each flags value.
fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(&mut self, iterator: T) {
for item in iterator { self.insert(item) }
}
}
impl ::bitflags::__private::core::iter::FromIterator<InternalBitFlags>
for InternalBitFlags {
/// The bitwise or (`|`) of the bits in each flags value.
fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(iterator: T) -> Self {
use ::bitflags::__private::core::iter::Extend;
let mut result = Self::empty();
result.extend(iterator);
result
}
}
impl InternalBitFlags {
/// Yield a set of contained flags values.
///
/// Each yielded flags value will correspond to a defined named flag. Any unknown bits
/// will be yielded together as a final flags value.
#[inline]
pub const fn iter(&self)
-> ::bitflags::iter::Iter<PathsToNested> {
::bitflags::iter::Iter::__private_const_new(<PathsToNested as
::bitflags::Flags>::FLAGS,
PathsToNested::from_bits_retain(self.bits()),
PathsToNested::from_bits_retain(self.bits()))
}
/// Yield a set of contained named flags values.
///
/// This method is like [`iter`](#method.iter), except only yields bits in contained named flags.
/// Any unknown bits, or bits not corresponding to a contained flag will not be yielded.
#[inline]
pub const fn iter_names(&self)
-> ::bitflags::iter::IterNames<PathsToNested> {
::bitflags::iter::IterNames::__private_const_new(<PathsToNested
as ::bitflags::Flags>::FLAGS,
PathsToNested::from_bits_retain(self.bits()),
PathsToNested::from_bits_retain(self.bits()))
}
}
impl ::bitflags::__private::core::iter::IntoIterator for
InternalBitFlags {
type Item = PathsToNested;
type IntoIter = ::bitflags::iter::Iter<PathsToNested>;
fn into_iter(self) -> Self::IntoIter { self.iter() }
}
impl InternalBitFlags {
/// Returns a mutable reference to the raw value of the flags currently stored.
#[inline]
pub fn bits_mut(&mut self) -> &mut u8 { &mut self.0 }
}
#[allow(dead_code, deprecated, unused_attributes)]
impl PathsToNested {
/// Get a flags value with all bits unset.
#[inline]
pub const fn empty() -> Self { Self(InternalBitFlags::empty()) }
/// Get a flags value with all known bits set.
#[inline]
pub const fn all() -> Self { Self(InternalBitFlags::all()) }
/// Get the underlying bits value.
///
/// The returned value is exactly the bits set in this flags value.
#[inline]
pub const fn bits(&self) -> u8 { self.0.bits() }
/// Convert from a bits value.
///
/// This method will return `None` if any unknown bits are set.
#[inline]
pub const fn from_bits(bits: u8)
-> ::bitflags::__private::core::option::Option<Self> {
match InternalBitFlags::from_bits(bits) {
::bitflags::__private::core::option::Option::Some(bits) =>
::bitflags::__private::core::option::Option::Some(Self(bits)),
::bitflags::__private::core::option::Option::None =>
::bitflags::__private::core::option::Option::None,
}
}
/// Convert from a bits value, unsetting any unknown bits.
#[inline]
pub const fn from_bits_truncate(bits: u8) -> Self {
Self(InternalBitFlags::from_bits_truncate(bits))
}
/// Convert from a bits value exactly.
#[inline]
pub const fn from_bits_retain(bits: u8) -> Self {
Self(InternalBitFlags::from_bits_retain(bits))
}
/// Get a flags value with the bits of a flag with the given name set.
///
/// This method will return `None` if `name` is empty or doesn't
/// correspond to any named flag.
#[inline]
pub fn from_name(name: &str)
-> ::bitflags::__private::core::option::Option<Self> {
match InternalBitFlags::from_name(name) {
::bitflags::__private::core::option::Option::Some(bits) =>
::bitflags::__private::core::option::Option::Some(Self(bits)),
::bitflags::__private::core::option::Option::None =>
::bitflags::__private::core::option::Option::None,
}
}
/// Whether all bits in this flags value are unset.
#[inline]
pub const fn is_empty(&self) -> bool { self.0.is_empty() }
/// Whether all known bits in this flags value are set.
#[inline]
pub const fn is_all(&self) -> bool { self.0.is_all() }
/// Whether any set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn intersects(&self, other: Self) -> bool {
self.0.intersects(other.0)
}
/// Whether all set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn contains(&self, other: Self) -> bool {
self.0.contains(other.0)
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
pub fn insert(&mut self, other: Self) { self.0.insert(other.0) }
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `remove` won't truncate `other`, but the `!` operator will.
#[inline]
pub fn remove(&mut self, other: Self) { self.0.remove(other.0) }
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
pub fn toggle(&mut self, other: Self) { self.0.toggle(other.0) }
/// Call `insert` when `value` is `true` or `remove` when `value` is `false`.
#[inline]
pub fn set(&mut self, other: Self, value: bool) {
self.0.set(other.0, value)
}
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn intersection(self, other: Self) -> Self {
Self(self.0.intersection(other.0))
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn union(self, other: Self) -> Self {
Self(self.0.union(other.0))
}
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
#[must_use]
pub const fn difference(self, other: Self) -> Self {
Self(self.0.difference(other.0))
}
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn symmetric_difference(self, other: Self) -> Self {
Self(self.0.symmetric_difference(other.0))
}
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
#[must_use]
pub const fn complement(self) -> Self {
Self(self.0.complement())
}
}
impl ::bitflags::__private::core::fmt::Binary for PathsToNested {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::Octal for PathsToNested {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::LowerHex for PathsToNested {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::UpperHex for PathsToNested {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::ops::BitOr for PathsToNested {
type Output = Self;
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor(self, other: PathsToNested) -> Self { self.union(other) }
}
impl ::bitflags::__private::core::ops::BitOrAssign for PathsToNested {
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor_assign(&mut self, other: Self) { self.insert(other); }
}
impl ::bitflags::__private::core::ops::BitXor for PathsToNested {
type Output = Self;
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor(self, other: Self) -> Self {
self.symmetric_difference(other)
}
}
impl ::bitflags::__private::core::ops::BitXorAssign for PathsToNested
{
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
}
impl ::bitflags::__private::core::ops::BitAnd for PathsToNested {
type Output = Self;
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand(self, other: Self) -> Self { self.intersection(other) }
}
impl ::bitflags::__private::core::ops::BitAndAssign for PathsToNested
{
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand_assign(&mut self, other: Self) {
*self =
Self::from_bits_retain(self.bits()).intersection(other);
}
}
impl ::bitflags::__private::core::ops::Sub for PathsToNested {
type Output = Self;
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub(self, other: Self) -> Self { self.difference(other) }
}
impl ::bitflags::__private::core::ops::SubAssign for PathsToNested {
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub_assign(&mut self, other: Self) { self.remove(other); }
}
impl ::bitflags::__private::core::ops::Not for PathsToNested {
type Output = Self;
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
fn not(self) -> Self { self.complement() }
}
impl ::bitflags::__private::core::iter::Extend<PathsToNested> for
PathsToNested {
/// The bitwise or (`|`) of the bits in each flags value.
fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(&mut self, iterator: T) {
for item in iterator { self.insert(item) }
}
}
impl ::bitflags::__private::core::iter::FromIterator<PathsToNested>
for PathsToNested {
/// The bitwise or (`|`) of the bits in each flags value.
fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(iterator: T) -> Self {
use ::bitflags::__private::core::iter::Extend;
let mut result = Self::empty();
result.extend(iterator);
result
}
}
impl PathsToNested {
/// Yield a set of contained flags values.
///
/// Each yielded flags value will correspond to a defined named flag. Any unknown bits
/// will be yielded together as a final flags value.
#[inline]
pub const fn iter(&self)
-> ::bitflags::iter::Iter<PathsToNested> {
::bitflags::iter::Iter::__private_const_new(<PathsToNested as
::bitflags::Flags>::FLAGS,
PathsToNested::from_bits_retain(self.bits()),
PathsToNested::from_bits_retain(self.bits()))
}
/// Yield a set of contained named flags values.
///
/// This method is like [`iter`](#method.iter), except only yields bits in contained named flags.
/// Any unknown bits, or bits not corresponding to a contained flag will not be yielded.
#[inline]
pub const fn iter_names(&self)
-> ::bitflags::iter::IterNames<PathsToNested> {
::bitflags::iter::IterNames::__private_const_new(<PathsToNested
as ::bitflags::Flags>::FLAGS,
PathsToNested::from_bits_retain(self.bits()),
PathsToNested::from_bits_retain(self.bits()))
}
}
impl ::bitflags::__private::core::iter::IntoIterator for PathsToNested
{
type Item = PathsToNested;
type IntoIter = ::bitflags::iter::Iter<PathsToNested>;
fn into_iter(self) -> Self::IntoIter { self.iter() }
}
};bitflags::bitflags! {
374/// Tracks how nested goals have been accessed. This is necessary to disable
375 /// global cache entries if computing them would otherwise result in a cycle or
376 /// access a provisional cache entry.
377#[derive(Debug, Clone, Copy, PartialEq, Eq)]
378pub struct PathsToNested: u8 {
379/// The initial value when adding a goal to its own nested goals.
380const EMPTY = 1 << 0;
381const INDUCTIVE = 1 << 1;
382const UNKNOWN = 1 << 2;
383const COINDUCTIVE = 1 << 3;
384const FORCED_AMBIGUITY = 1 << 4;
385 }
386}387impl From<PathKind> for PathsToNested {
388fn from(path: PathKind) -> PathsToNested {
389match path {
390 PathKind::Inductive => PathsToNested::INDUCTIVE,
391 PathKind::Unknown => PathsToNested::UNKNOWN,
392 PathKind::Coinductive => PathsToNested::COINDUCTIVE,
393 PathKind::ForcedAmbiguity => PathsToNested::FORCED_AMBIGUITY,
394 }
395 }
396}
397impl PathsToNested {
398/// The implementation of this function is kind of ugly. We check whether
399 /// there currently exist 'weaker' paths in the set, if so we upgrade these
400 /// paths to at least `path`.
401#[must_use]
402fn extend_with(mut self, path: PathKind) -> Self {
403match path {
404 PathKind::Inductive => {
405if self.intersects(PathsToNested::EMPTY) {
406self.remove(PathsToNested::EMPTY);
407self.insert(PathsToNested::INDUCTIVE);
408 }
409 }
410 PathKind::Unknown => {
411if self.intersects(PathsToNested::EMPTY | PathsToNested::INDUCTIVE) {
412self.remove(PathsToNested::EMPTY | PathsToNested::INDUCTIVE);
413self.insert(PathsToNested::UNKNOWN);
414 }
415 }
416 PathKind::Coinductive => {
417if self.intersects(
418PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN,
419 ) {
420self.remove(
421PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN,
422 );
423self.insert(PathsToNested::COINDUCTIVE);
424 }
425 }
426 PathKind::ForcedAmbiguity => {
427if self.intersects(
428PathsToNested::EMPTY429 | PathsToNested::INDUCTIVE430 | PathsToNested::UNKNOWN431 | PathsToNested::COINDUCTIVE,
432 ) {
433self.remove(
434PathsToNested::EMPTY435 | PathsToNested::INDUCTIVE436 | PathsToNested::UNKNOWN437 | PathsToNested::COINDUCTIVE,
438 );
439self.insert(PathsToNested::FORCED_AMBIGUITY);
440 }
441 }
442 }
443444self445 }
446447#[must_use]
448fn extend_with_paths(self, path: PathsToNested) -> Self {
449let mut new = PathsToNested::empty();
450for p in path.iter_paths() {
451 new |= self.extend_with(p);
452 }
453new454 }
455456fn iter_paths(self) -> impl Iterator<Item = PathKind> {
457let (PathKind::Inductive458 | PathKind::Unknown459 | PathKind::Coinductive460 | PathKind::ForcedAmbiguity);
461 [PathKind::Inductive, PathKind::Unknown, PathKind::Coinductive, PathKind::ForcedAmbiguity]
462 .into_iter()
463 .filter(move |&p| self.contains(p.into()))
464 }
465}
466467/// The nested goals of each stack entry and the path from the
468/// stack entry to that nested goal.
469///
470/// They are used when checking whether reevaluating a global cache
471/// would encounter a cycle or use a provisional cache entry given the
472/// current search graph state. We need to disable the global cache
473/// in this case as it could otherwise result in behavioral differences.
474/// Cycles can impact behavior. The cycle ABA may have different final
475/// results from a the cycle BAB depending on the cycle root.
476///
477/// We only start tracking nested goals once we've either encountered
478/// overflow or a solver cycle. This is a performance optimization to
479/// avoid tracking nested goals on the happy path.
480#[automatically_derived]
impl<X: Cx> ::core::clone::Clone for NestedGoals<X> where X: Cx {
#[inline]
fn clone(&self) -> Self {
match self {
NestedGoals { nested_goals: ref __field_nested_goals } =>
NestedGoals {
nested_goals: ::core::clone::Clone::clone(__field_nested_goals),
},
}
}
}#[derive_where(Debug, Default, Clone; X: Cx)]481struct NestedGoals<X: Cx> {
482 nested_goals: HashMap<X::Input, PathsToNested>,
483}
484impl<X: Cx> NestedGoals<X> {
485fn is_empty(&self) -> bool {
486self.nested_goals.is_empty()
487 }
488489fn insert(&mut self, input: X::Input, paths_to_nested: PathsToNested) {
490match self.nested_goals.entry(input) {
491 Entry::Occupied(mut entry) => *entry.get_mut() |= paths_to_nested,
492 Entry::Vacant(entry) => drop(entry.insert(paths_to_nested)),
493 }
494 }
495496/// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
497 /// to the parent goal.
498 ///
499 /// If the path from this goal to the nested goal is inductive, the paths from this goal
500 /// to all nested goals of that nested goal are also inductive. Otherwise the paths are
501 /// the same as for the child.
502fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals<X>) {
503// Each nested goal is updated independently, and `insert` only unions paths for that
504 // goal, so traversal order cannot affect the result.
505#[allow(rustc::potential_query_instability)]
506for (input, paths_to_nested) in nested_goals.iter() {
507let paths_to_nested = paths_to_nested.extend_with(step_kind);
508self.insert(input, paths_to_nested);
509 }
510 }
511512// This helper intentionally exposes unstable hash iteration so each caller must opt in
513 // locally and justify why its traversal is order-insensitive.
514#[cfg_attr(feature = "nightly", rustc_lint_query_instability)]
515 #[allow(rustc::potential_query_instability)]
516fn iter(&self) -> impl Iterator<Item = (X::Input, PathsToNested)> + '_ {
517self.nested_goals.iter().map(|(i, p)| (*i, *p))
518 }
519520fn contains(&self, input: X::Input) -> bool {
521self.nested_goals.contains_key(&input)
522 }
523}
524525/// A provisional result of an already computed goals which depends on other
526/// goals still on the stack.
527#[automatically_derived]
impl<X: Cx> ::core::fmt::Debug for ProvisionalCacheEntry<X> where X: Cx {
fn fmt(&self, __f: &mut ::core::fmt::Formatter<'_>)
-> ::core::fmt::Result {
match self {
ProvisionalCacheEntry {
encountered_overflow: ref __field_encountered_overflow,
heads: ref __field_heads,
path_from_head: ref __field_path_from_head,
result: ref __field_result } => {
let mut __builder =
::core::fmt::Formatter::debug_struct(__f,
"ProvisionalCacheEntry");
::core::fmt::DebugStruct::field(&mut __builder,
"encountered_overflow", __field_encountered_overflow);
::core::fmt::DebugStruct::field(&mut __builder, "heads",
__field_heads);
::core::fmt::DebugStruct::field(&mut __builder,
"path_from_head", __field_path_from_head);
::core::fmt::DebugStruct::field(&mut __builder, "result",
__field_result);
::core::fmt::DebugStruct::finish(&mut __builder)
}
}
}
}#[derive_where(Debug; X: Cx)]528struct ProvisionalCacheEntry<X: Cx> {
529/// Whether evaluating the goal encountered overflow. This is used to
530 /// disable the cache entry except if the last goal on the stack is
531 /// already involved in this cycle.
532encountered_overflow: bool,
533/// All cycle heads this cache entry depends on.
534heads: CycleHeads,
535/// The path from the highest cycle head to this goal. This differs from
536 /// `heads` which tracks the path to the cycle head *from* this goal.
537path_from_head: PathKind,
538 result: X::Result,
539}
540541/// The final result of evaluating a goal.
542///
543/// We reset `encountered_overflow` when reevaluating a goal,
544/// but need to track whether we've hit the recursion limit at
545/// all for correctness.
546///
547/// We've previously simply returned the final `StackEntry` but this
548/// made it easy to accidentally drop information from the previous
549/// evaluation.
550#[automatically_derived]
impl<X: Cx> ::core::fmt::Debug for EvaluationResult<X> where X: Cx {
fn fmt(&self, __f: &mut ::core::fmt::Formatter<'_>)
-> ::core::fmt::Result {
match self {
EvaluationResult {
encountered_overflow: ref __field_encountered_overflow,
required_depth: ref __field_required_depth,
heads: ref __field_heads,
nested_goals: ref __field_nested_goals,
result: ref __field_result } => {
let mut __builder =
::core::fmt::Formatter::debug_struct(__f,
"EvaluationResult");
::core::fmt::DebugStruct::field(&mut __builder,
"encountered_overflow", __field_encountered_overflow);
::core::fmt::DebugStruct::field(&mut __builder,
"required_depth", __field_required_depth);
::core::fmt::DebugStruct::field(&mut __builder, "heads",
__field_heads);
::core::fmt::DebugStruct::field(&mut __builder,
"nested_goals", __field_nested_goals);
::core::fmt::DebugStruct::field(&mut __builder, "result",
__field_result);
::core::fmt::DebugStruct::finish(&mut __builder)
}
}
}
}#[derive_where(Debug; X: Cx)]551struct EvaluationResult<X: Cx> {
552 encountered_overflow: bool,
553 required_depth: usize,
554 heads: CycleHeads,
555 nested_goals: NestedGoals<X>,
556 result: X::Result,
557}
558559impl<X: Cx> EvaluationResult<X> {
560fn finalize(
561 final_entry: StackEntry<X>,
562 encountered_overflow: bool,
563 result: X::Result,
564 ) -> EvaluationResult<X> {
565EvaluationResult {
566encountered_overflow,
567// Unlike `encountered_overflow`, we share `heads`, `required_depth`,
568 // and `nested_goals` between evaluations.
569required_depth: final_entry.required_depth,
570 heads: final_entry.heads,
571 nested_goals: final_entry.nested_goals,
572// We only care about the final result.
573result,
574 }
575 }
576}
577578pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
579 root_depth: AvailableDepth,
580 stack: Stack<X>,
581/// The provisional cache contains entries for already computed goals which
582 /// still depend on goals higher-up in the stack. We don't move them to the
583 /// global cache and track them locally instead. A provisional cache entry
584 /// is only valid until the result of one of its cycle heads changes.
585provisional_cache: HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,
586587 _marker: PhantomData<D>,
588}
589590/// While [`SearchGraph::update_parent_goal`] can be mostly shared between
591/// ordinary nested goals/global cache hits and provisional cache hits,
592/// using the provisional cache should not add any nested goals.
593///
594/// `nested_goals` are only used when checking whether global cache entries
595/// are applicable. This only cares about whether a goal is actually accessed.
596/// Given that the usage of the provisional cache is fully deterministic, we
597/// don't need to track the nested goals used while computing a provisional
598/// cache entry.
599enum UpdateParentGoalCtxt<'a, X: Cx> {
600 Ordinary(&'a NestedGoals<X>),
601 CycleOnStack(X::Input),
602 ProvisionalCacheHit,
603}
604605impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
606pub fn new(root_depth: usize) -> SearchGraph<D> {
607Self {
608 root_depth: AvailableDepth(root_depth),
609 stack: Default::default(),
610 provisional_cache: Default::default(),
611 _marker: PhantomData,
612 }
613 }
614615/// Lazily update the stack entry for the parent goal.
616 /// This behavior is shared between actually evaluating goals
617 /// and using existing global cache entries to make sure they
618 /// have the same impact on the remaining evaluation.
619fn update_parent_goal(
620 stack: &mut Stack<X>,
621 step_kind_from_parent: PathKind,
622 required_depth_for_nested: usize,
623 heads: impl Iterator<Item = (StackDepth, CycleHead)>,
624 encountered_overflow: bool,
625 context: UpdateParentGoalCtxt<'_, X>,
626 ) {
627if let Some((parent_index, parent)) = stack.last_mut_with_index() {
628parent.required_depth = parent.required_depth.max(required_depth_for_nested + 1);
629parent.encountered_overflow |= encountered_overflow;
630631for (head_index, head) in heads {
632if let Some(candidate_usages) = &mut parent.candidate_usages {
633 candidate_usages
634 .usages
635 .get_or_insert_default()
636 .entry(head_index)
637 .or_default()
638 .add_usages_from_nested(head.usages);
639 }
640match head_index.cmp(&parent_index) {
641 Ordering::Less => parent.heads.insert(
642 head_index,
643 head.paths_to_head.extend_with(step_kind_from_parent),
644 head.usages,
645 ),
646 Ordering::Equal => {
647 parent.usages.get_or_insert_default().add_usages_from_nested(head.usages);
648 }
649 Ordering::Greater => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
650 }
651 }
652let parent_depends_on_cycle = match context {
653 UpdateParentGoalCtxt::Ordinary(nested_goals) => {
654parent.nested_goals.extend_from_child(step_kind_from_parent, nested_goals);
655 !nested_goals.is_empty()
656 }
657 UpdateParentGoalCtxt::CycleOnStack(head) => {
658// We lookup provisional cache entries before detecting cycles.
659 // We therefore can't use a global cache entry if it contains a cycle
660 // whose head is in the provisional cache.
661parent.nested_goals.insert(head, step_kind_from_parent.into());
662true
663}
664 UpdateParentGoalCtxt::ProvisionalCacheHit => true,
665 };
666// Once we've got goals which encountered overflow or a cycle,
667 // we track all goals whose behavior may depend depend on these
668 // goals as this change may cause them to now depend on additional
669 // goals, resulting in new cycles. See the dev-guide for examples.
670if parent_depends_on_cycle {
671parent.nested_goals.insert(parent.input, PathsToNested::EMPTY);
672 }
673 }
674 }
675676pub fn is_empty(&self) -> bool {
677if self.stack.is_empty() {
678if true {
if !self.provisional_cache.is_empty() {
::core::panicking::panic("assertion failed: self.provisional_cache.is_empty()")
};
};debug_assert!(self.provisional_cache.is_empty());
679true
680} else {
681false
682}
683 }
684685/// The number of goals currently in the search graph. This should only be
686 /// used for debugging purposes.
687pub fn debug_current_depth(&self) -> usize {
688self.stack.len()
689 }
690691/// Whether the path from `head` to the current stack entry is inductive or coinductive.
692 ///
693 /// The `step_kind_to_head` is used to add a single additional path segment to the path on
694 /// the stack which completes the cycle. This given an inductive step AB which then cycles
695 /// coinductively with A, we need to treat this cycle as coinductive.
696fn cycle_path_kind(
697 stack: &Stack<X>,
698 step_kind_to_head: PathKind,
699 head: StackDepth,
700 ) -> PathKind {
701stack.cycle_step_kinds(head).fold(step_kind_to_head, |curr, step| curr.extend(step))
702 }
703704pub fn enter_single_candidate(&mut self) {
705let prev = self.stack.last_mut().unwrap().candidate_usages.replace(Default::default());
706if true {
if !prev.is_none() {
{
::core::panicking::panic_fmt(format_args!("existing candidate_usages: {0:?}",
prev));
}
};
};debug_assert!(prev.is_none(), "existing candidate_usages: {prev:?}");
707 }
708709pub fn finish_single_candidate(&mut self) -> CandidateHeadUsages {
710self.stack.last_mut().unwrap().candidate_usages.take().unwrap()
711 }
712713pub fn ignore_candidate_head_usages(&mut self, usages: CandidateHeadUsages) {
714if let Some(usages) = usages.usages {
715let (entry_index, entry) = self.stack.last_mut_with_index().unwrap();
716// Ignoring usages only mutates the state for the current `head_index`, so the
717 // resulting per-head state is unchanged by iteration order.
718#[allow(rustc::potential_query_instability)]
719for (head_index, usages) in usages.into_iter() {
720if head_index == entry_index {
721 entry.usages.unwrap().ignore_usages(usages);
722 } else {
723 entry.heads.ignore_usages(head_index, usages);
724 }
725 }
726 }
727 }
728729pub fn evaluate_root_goal_for_proof_tree(
730 cx: X,
731 root_depth: usize,
732 input: X::Input,
733 inspect: &mut D::ProofTreeBuilder,
734 ) -> X::Result {
735let mut this = SearchGraph::<D>::new(root_depth);
736let available_depth = AvailableDepth(root_depth);
737let step_kind_from_parent = PathKind::Inductive; // is never used
738this.stack.push(StackEntry {
739input,
740step_kind_from_parent,
741available_depth,
742 provisional_result: None,
743 required_depth: 0,
744 heads: Default::default(),
745 encountered_overflow: false,
746 usages: None,
747 candidate_usages: None,
748 nested_goals: Default::default(),
749 });
750let evaluation_result = this.evaluate_goal_in_task(cx, input, inspect);
751evaluation_result.result
752 }
753754/// Probably the most involved method of the whole solver.
755 ///
756 /// While goals get computed via `D::compute_goal`, this function handles
757 /// caching, overflow, and cycles.
758x;#[instrument(level = "debug", skip(self, cx, inspect), ret)]759pub fn evaluate_goal(
760&mut self,
761 cx: X,
762 input: X::Input,
763 step_kind_from_parent: PathKind,
764 inspect: &mut D::ProofTreeBuilder,
765 ) -> X::Result {
766let Some(available_depth) =
767 AvailableDepth::allowed_depth_for_nested::<D>(self.root_depth, &self.stack)
768else {
769return self.handle_overflow(cx, input);
770 };
771772// We check the provisional cache before checking the global cache. This simplifies
773 // the implementation as we can avoid worrying about cases where both the global and
774 // provisional cache may apply, e.g. consider the following example
775 //
776 // - xxBA overflow
777 // - A
778 // - BA cycle
779 // - CB :x:
780if let Some(result) = self.lookup_provisional_cache(input, step_kind_from_parent) {
781return result;
782 }
783784// Lookup the global cache unless we're building proof trees or are currently
785 // fuzzing.
786let validate_cache = if !D::inspect_is_noop(inspect) {
787None
788} else if let Some(scope) = D::enter_validation_scope(cx, input) {
789// When validating the global cache we need to track the goals for which the
790 // global cache has been disabled as it may otherwise change the result for
791 // cyclic goals. We don't care about goals which are not on the current stack
792 // so it's fine to drop their scope eagerly.
793self.lookup_global_cache_untracked(cx, input, step_kind_from_parent, available_depth)
794 .inspect(|expected| debug!(?expected, "validate cache entry"))
795 .map(|r| (scope, r))
796 } else if let Some(result) =
797self.lookup_global_cache(cx, input, step_kind_from_parent, available_depth)
798 {
799return result;
800 } else {
801None
802};
803804// Detect cycles on the stack. We do this after the global cache lookup to
805 // avoid iterating over the stack in case a goal has already been computed.
806 // This may not have an actual performance impact and we could reorder them
807 // as it may reduce the number of `nested_goals` we need to track.
808if let Some(result) = self.check_cycle_on_stack(cx, input, step_kind_from_parent) {
809debug_assert!(validate_cache.is_none(), "global cache and cycle on stack: {input:?}");
810return result;
811 }
812813// Unfortunate, it looks like we actually have to compute this goal.
814self.stack.push(StackEntry {
815 input,
816 step_kind_from_parent,
817 available_depth,
818 provisional_result: None,
819 required_depth: 0,
820 heads: Default::default(),
821 encountered_overflow: false,
822 usages: None,
823 candidate_usages: None,
824 nested_goals: Default::default(),
825 });
826827// This is for global caching, so we properly track query dependencies.
828 // Everything that affects the `result` should be performed within this
829 // `with_cached_task` closure. If computing this goal depends on something
830 // not tracked by the cache key and from outside of this anon task, it
831 // must not be added to the global cache. Notably, this is the case for
832 // trait solver cycles participants.
833let (evaluation_result, dep_node) =
834 cx.with_cached_task(|| self.evaluate_goal_in_task(cx, input, inspect));
835836// We've finished computing the goal and have popped it from the stack,
837 // lazily update its parent goal.
838Self::update_parent_goal(
839&mut self.stack,
840 step_kind_from_parent,
841 evaluation_result.required_depth,
842 evaluation_result.heads.iter(),
843 evaluation_result.encountered_overflow,
844 UpdateParentGoalCtxt::Ordinary(&evaluation_result.nested_goals),
845 );
846let result = evaluation_result.result;
847848// We're now done with this goal. We only add the root of cycles to the global cache.
849 // In case this goal is involved in a larger cycle add it to the provisional cache.
850if evaluation_result.heads.is_empty() {
851if let Some((_scope, expected)) = validate_cache {
852// Do not try to move a goal into the cache again if we're testing
853 // the global cache.
854assert_eq!(expected, evaluation_result.result, "input={input:?}");
855 } else if D::inspect_is_noop(inspect) {
856self.insert_global_cache(cx, input, evaluation_result, dep_node)
857 }
858 } else if D::ENABLE_PROVISIONAL_CACHE {
859debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
860let entry = self.provisional_cache.entry(input).or_default();
861let EvaluationResult {
862 encountered_overflow,
863 required_depth: _,
864 heads,
865 nested_goals: _,
866 result,
867 } = evaluation_result;
868let path_from_head = Self::cycle_path_kind(
869&self.stack,
870 step_kind_from_parent,
871 heads.highest_cycle_head_index(),
872 );
873let provisional_cache_entry =
874 ProvisionalCacheEntry { encountered_overflow, heads, path_from_head, result };
875debug!(?provisional_cache_entry);
876 entry.push(provisional_cache_entry);
877 } else {
878debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
879 }
880881 result
882 }
883884fn handle_overflow(&mut self, cx: X, input: X::Input) -> X::Result {
885if let Some(last) = self.stack.last_mut() {
886last.encountered_overflow = true;
887// If computing a goal `B` depends on another goal `A` and
888 // `A` has a nested goal which overflows, then computing `B`
889 // at the same depth, but with `A` already on the stack,
890 // would encounter a solver cycle instead, potentially
891 // changing the result.
892 //
893 // We must therefore not use the global cache entry for `B` in that case.
894 // See tests/ui/traits/next-solver/cycles/hidden-by-overflow.rs
895last.nested_goals.insert(last.input, PathsToNested::EMPTY);
896 }
897898{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:898",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(898u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("encountered stack overflow")
as &dyn Value))])
});
} else { ; }
};debug!("encountered stack overflow");
899 D::stack_overflow_result(cx, input)
900 }
901902/// When reevaluating a goal with a changed provisional result, all provisional cache entry
903 /// which depend on this goal get invalidated.
904 ///
905 /// Note that we keep provisional cache entries which accessed this goal as a cycle head, but
906 /// don't depend on its value.
907fn clear_dependent_provisional_results_for_rerun(&mut self) {
908let rerun_index = self.stack.next_index();
909// Each cached entry is filtered independently based on whether it depends on
910 // `rerun_index`, so bucket traversal order does not matter.
911#[allow(rustc::potential_query_instability)]
912self.provisional_cache.retain(|_, entries| {
913entries.retain(|entry| {
914let (head_index, head) = entry.heads.highest_cycle_head();
915head_index != rerun_index || head.usages.is_empty()
916 });
917 !entries.is_empty()
918 });
919 }
920}
921922/// We need to rebase provisional cache entries when popping one of their cycle
923/// heads from the stack. This may not necessarily mean that we've actually
924/// reached a fixpoint for that cycle head, which impacts the way we rebase
925/// provisional cache entries.
926#[automatically_derived]
impl<X: Cx> ::core::fmt::Debug for RebaseReason<X> where X: Cx {
fn fmt(&self, __f: &mut ::core::fmt::Formatter<'_>)
-> ::core::fmt::Result {
match self {
RebaseReason::NoCycleUsages =>
::core::fmt::Formatter::write_str(__f, "NoCycleUsages"),
RebaseReason::Ambiguity(ref __field_0) => {
let mut __builder =
::core::fmt::Formatter::debug_tuple(__f, "Ambiguity");
::core::fmt::DebugTuple::field(&mut __builder, __field_0);
::core::fmt::DebugTuple::finish(&mut __builder)
}
RebaseReason::Overflow =>
::core::fmt::Formatter::write_str(__f, "Overflow"),
RebaseReason::ReachedFixpoint(ref __field_0) => {
let mut __builder =
::core::fmt::Formatter::debug_tuple(__f, "ReachedFixpoint");
::core::fmt::DebugTuple::field(&mut __builder, __field_0);
::core::fmt::DebugTuple::finish(&mut __builder)
}
}
}
}#[derive_where(Debug; X: Cx)]927enum RebaseReason<X: Cx> {
928 NoCycleUsages,
929 Ambiguity(X::AmbiguityInfo),
930 Overflow,
931/// We've actually reached a fixpoint.
932 ///
933 /// This either happens in the first evaluation step for the cycle head.
934 /// In this case the used provisional result depends on the cycle `PathKind`.
935 /// We store this path kind to check whether the provisional cache entry
936 /// we're rebasing relied on the same cycles.
937 ///
938 /// In later iterations cycles always return `stack_entry.provisional_result`
939 /// so we no longer depend on the `PathKind`. We store `None` in that case.
940ReachedFixpoint(Option<PathKind>),
941}
942943impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D, X> {
944/// A necessary optimization to handle complex solver cycles. A provisional cache entry
945 /// relies on a set of cycle heads and the path towards these heads. When popping a cycle
946 /// head from the stack after we've finished computing it, we can't be sure that the
947 /// provisional cache entry is still applicable. We need to keep the cache entries to
948 /// prevent hangs.
949 ///
950 /// This can be thought of as pretending to reevaluate the popped head as nested goals
951 /// of this provisional result. For this to be correct, all cycles encountered while
952 /// we'd reevaluate the cycle head as a nested goal must keep the same cycle kind.
953 /// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html).
954 ///
955 /// In case the popped cycle head failed to reach a fixpoint anything which depends on
956 /// its provisional result is invalid. Actually discarding provisional cache entries in
957 /// this case would cause hangs, so we instead change the result of dependant provisional
958 /// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested
959 /// goals whose result doesn't actually depend on this cycle head, but that's acceptable
960 /// to me.
961#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("rebase_provisional_cache_entries",
"rustc_type_ir::search_graph", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(961u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["stack_entry",
"rebase_reason"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::TRACE <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = meta.fields().iter();
meta.fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&::tracing::field::debug(&stack_entry)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&::tracing::field::debug(&rebase_reason)
as &dyn Value))])
})
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: () = loop {};
return __tracing_attr_fake_return;
}
{
let popped_head_index = self.stack.next_index();
#[allow(rustc::potential_query_instability)]
self.provisional_cache.retain(|&input, entries|
{
entries.retain_mut(|entry|
{
let ProvisionalCacheEntry {
encountered_overflow: _, heads, path_from_head, result } =
entry;
let popped_head =
if heads.highest_cycle_head_index() == popped_head_index {
heads.remove_highest_cycle_head()
} else {
if true {
if !(heads.highest_cycle_head_index() < popped_head_index) {
::core::panicking::panic("assertion failed: heads.highest_cycle_head_index() < popped_head_index")
};
};
return true;
};
if popped_head.usages.is_empty() {
for (head_index, _) in stack_entry.heads.iter() {
heads.insert(head_index, PathsToNested::EMPTY,
HeadUsages::default());
}
} else {
let ep = popped_head.paths_to_head;
for (head_index, head) in stack_entry.heads.iter() {
let ph = head.paths_to_head;
let hp =
Self::cycle_path_kind(&self.stack,
stack_entry.step_kind_from_parent, head_index);
let he = hp.extend(*path_from_head);
for ph in ph.iter_paths() {
let hph = hp.extend(ph);
for ep in ep.iter_paths() {
let hep = ep.extend(he);
let heph = hep.extend(ph);
if hph != heph { return false; }
}
}
let eph = ep.extend_with_paths(ph);
heads.insert(head_index, eph, head.usages);
}
match rebase_reason {
RebaseReason::NoCycleUsages => return false,
RebaseReason::Ambiguity(info) => {
*result = D::propagate_ambiguity(cx, input, info);
}
RebaseReason::Overflow =>
*result = D::fixpoint_overflow_result(cx, input),
RebaseReason::ReachedFixpoint(None) => {}
RebaseReason::ReachedFixpoint(Some(path_kind)) => {
if !popped_head.usages.is_single(path_kind) {
return false;
}
}
};
}
let Some(new_highest_head_index) =
heads.opt_highest_cycle_head_index() else { return false; };
*path_from_head =
path_from_head.extend(Self::cycle_path_kind(&self.stack,
stack_entry.step_kind_from_parent, new_highest_head_index));
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1072",
"rustc_type_ir::search_graph", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1072u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message", "input",
"entry"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("rebased provisional cache entry")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&input) as
&dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&entry) as
&dyn Value))])
});
} else { ; }
};
true
});
!entries.is_empty()
});
}
}
}#[instrument(level = "trace", skip(self, cx))]962fn rebase_provisional_cache_entries(
963&mut self,
964 cx: X,
965 stack_entry: &StackEntry<X>,
966 rebase_reason: RebaseReason<X>,
967 ) {
968let popped_head_index = self.stack.next_index();
969// Rebasing decisions depend only on each provisional entry and the current stack state,
970 // so traversing the cache in hash order cannot change the final cache contents.
971#[allow(rustc::potential_query_instability)]
972self.provisional_cache.retain(|&input, entries| {
973 entries.retain_mut(|entry| {
974let ProvisionalCacheEntry {
975 encountered_overflow: _,
976 heads,
977 path_from_head,
978 result,
979 } = entry;
980let popped_head = if heads.highest_cycle_head_index() == popped_head_index {
981 heads.remove_highest_cycle_head()
982 } else {
983debug_assert!(heads.highest_cycle_head_index() < popped_head_index);
984return true;
985 };
986987// We're rebasing an entry `e` over a head `p`. This head
988 // has a number of own heads `h` it depends on.
989 //
990 // This causes our provisional result to depend on the heads
991 // of `p` to avoid moving any goal which uses this cache entry to
992 // the global cache.
993if popped_head.usages.is_empty() {
994// The result of `e` does not depend on the value of `p`. This we can
995 // keep using the result of this provisional cache entry even if evaluating
996 // `p` as a nested goal of `e` would have a different result.
997for (head_index, _) in stack_entry.heads.iter() {
998 heads.insert(head_index, PathsToNested::EMPTY, HeadUsages::default());
999 }
1000 } else {
1001// The entry `e` actually depends on the value of `p`. We need
1002 // to make sure that the value of `p` wouldn't change even if we
1003 // were to reevaluate it as a nested goal of `e` instead. For this
1004 // we check that the path kind of all paths `hph` remain the
1005 // same after rebasing.
1006 //
1007 // After rebasing the cycles `hph` will go through `e`. We need to make
1008 // sure that forall possible paths `hep`, `heph` is equal to `hph.`
1009let ep = popped_head.paths_to_head;
1010for (head_index, head) in stack_entry.heads.iter() {
1011let ph = head.paths_to_head;
1012let hp = Self::cycle_path_kind(
1013&self.stack,
1014 stack_entry.step_kind_from_parent,
1015 head_index,
1016 );
1017// We first validate that all cycles while computing `p` would stay
1018 // the same if we were to recompute it as a nested goal of `e`.
1019let he = hp.extend(*path_from_head);
1020for ph in ph.iter_paths() {
1021let hph = hp.extend(ph);
1022for ep in ep.iter_paths() {
1023let hep = ep.extend(he);
1024let heph = hep.extend(ph);
1025if hph != heph {
1026return false;
1027 }
1028 }
1029 }
10301031// If so, all paths reached while computing `p` have to get added
1032 // the heads of `e` to make sure that rebasing `e` again also considers
1033 // them.
1034let eph = ep.extend_with_paths(ph);
1035 heads.insert(head_index, eph, head.usages);
1036 }
10371038// The provisional cache entry does depend on the provisional result
1039 // of the popped cycle head. We need to mutate the result of our
1040 // provisional cache entry in case we did not reach a fixpoint.
1041match rebase_reason {
1042// If the cycle head does not actually depend on itself, then
1043 // the provisional result used by the provisional cache entry
1044 // is not actually equal to the final provisional result. We
1045 // need to discard the provisional cache entry in this case.
1046RebaseReason::NoCycleUsages => return false,
1047 RebaseReason::Ambiguity(info) => {
1048*result = D::propagate_ambiguity(cx, input, info);
1049 }
1050 RebaseReason::Overflow => *result = D::fixpoint_overflow_result(cx, input),
1051 RebaseReason::ReachedFixpoint(None) => {}
1052 RebaseReason::ReachedFixpoint(Some(path_kind)) => {
1053if !popped_head.usages.is_single(path_kind) {
1054return false;
1055 }
1056 }
1057 };
1058 }
10591060let Some(new_highest_head_index) = heads.opt_highest_cycle_head_index() else {
1061return false;
1062 };
10631064// We now care about the path from the next highest cycle head to the
1065 // provisional cache entry.
1066*path_from_head = path_from_head.extend(Self::cycle_path_kind(
1067&self.stack,
1068 stack_entry.step_kind_from_parent,
1069 new_highest_head_index,
1070 ));
10711072trace!(?input, ?entry, "rebased provisional cache entry");
10731074true
1075});
1076 !entries.is_empty()
1077 });
1078 }
10791080fn lookup_provisional_cache(
1081&mut self,
1082 input: X::Input,
1083 step_kind_from_parent: PathKind,
1084 ) -> Option<X::Result> {
1085if !D::ENABLE_PROVISIONAL_CACHE {
1086return None;
1087 }
10881089let entries = self.provisional_cache.get(&input)?;
1090for &ProvisionalCacheEntry { encountered_overflow, ref heads, path_from_head, result } in
1091entries
1092 {
1093let head_index = heads.highest_cycle_head_index();
1094if encountered_overflow {
1095// This check is overly strict and very subtle. We need to make sure that if
1096 // a global cache entry depends on some goal without adding it to its
1097 // `nested_goals`, that goal must never have an applicable provisional
1098 // cache entry to avoid incorrectly applying the cache entry.
1099 //
1100 // As we'd have to otherwise track literally all nested goals, we only
1101 // apply provisional cache entries which encountered overflow once the
1102 // current goal is already part of the same cycle. This check could be
1103 // improved but seems to be good enough for now.
1104let last = self.stack.last().unwrap();
1105if last.heads.opt_lowest_cycle_head_index().is_none_or(|lowest| lowest > head_index)
1106 {
1107continue;
1108 }
1109 }
11101111// A provisional cache entry is only valid if the current path from its
1112 // highest cycle head to the goal is the same.
1113if path_from_head
1114 == Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index)
1115 {
1116Self::update_parent_goal(
1117&mut self.stack,
1118 step_kind_from_parent,
11190,
1120 heads.iter(),
1121 encountered_overflow,
1122 UpdateParentGoalCtxt::ProvisionalCacheHit,
1123 );
1124{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1124",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1124u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message",
"head_index", "path_from_head"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("provisional cache hit")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&head_index)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&path_from_head)
as &dyn Value))])
});
} else { ; }
};debug!(?head_index, ?path_from_head, "provisional cache hit");
1125return Some(result);
1126 }
1127 }
11281129None1130 }
11311132/// Even if there is a global cache entry for a given goal, we need to make sure
1133 /// evaluating this entry would not have ended up depending on either a goal
1134 /// already on the stack or a provisional cache entry.
1135fn candidate_is_applicable(
1136&self,
1137 step_kind_from_parent: PathKind,
1138 nested_goals: &NestedGoals<X>,
1139 ) -> bool {
1140// If the global cache entry didn't depend on any nested goals, it always
1141 // applies.
1142if nested_goals.is_empty() {
1143return true;
1144 }
11451146// If a nested goal of the global cache entry is on the stack, we would
1147 // definitely encounter a cycle.
1148if self.stack.iter().any(|e| nested_goals.contains(e.input)) {
1149{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1149",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1149u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("cache entry not applicable due to stack")
as &dyn Value))])
});
} else { ; }
};debug!("cache entry not applicable due to stack");
1150return false;
1151 }
11521153// The global cache entry is also invalid if there's a provisional cache entry
1154 // would apply for any of its nested goals.
1155 // Any matching provisional entry rejects the candidate,
1156 // so iteration order only affects when we return `false`, not the final answer.
1157#[allow(rustc::potential_query_instability)]
1158for (input, path_from_global_entry) in nested_goals.iter() {
1159let Some(entries) = self.provisional_cache.get(&input) else {
1160continue;
1161 };
11621163{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1163",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1163u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message", "input",
"path_from_global_entry", "entries"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("candidate_is_applicable")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&input) as
&dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&path_from_global_entry)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&entries) as
&dyn Value))])
});
} else { ; }
};debug!(?input, ?path_from_global_entry, ?entries, "candidate_is_applicable");
1164// A provisional cache entry is applicable if the path to
1165 // its highest cycle head is equal to the expected path.
1166for &ProvisionalCacheEntry {
1167 encountered_overflow,
1168ref heads,
1169 path_from_head: head_to_provisional,
1170 result: _,
1171 } in entries.iter()
1172 {
1173// We don't have to worry about provisional cache entries which encountered
1174 // overflow, see the relevant comment in `lookup_provisional_cache`.
1175if encountered_overflow {
1176continue;
1177 }
11781179// A provisional cache entry only applies if the path from its highest head
1180 // matches the path when encountering the goal.
1181 //
1182 // We check if any of the paths taken while computing the global goal
1183 // would end up with an applicable provisional cache entry.
1184let head_index = heads.highest_cycle_head_index();
1185let head_to_curr =
1186Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index);
1187let full_paths = path_from_global_entry.extend_with(head_to_curr);
1188if full_paths.contains(head_to_provisional.into()) {
1189{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1189",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1189u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message",
"full_paths", "head_to_provisional"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("cache entry not applicable due to matching paths")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&full_paths)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&head_to_provisional)
as &dyn Value))])
});
} else { ; }
};debug!(
1190?full_paths,
1191?head_to_provisional,
1192"cache entry not applicable due to matching paths"
1193);
1194return false;
1195 }
1196 }
1197 }
11981199true
1200}
12011202/// Used when fuzzing the global cache. Accesses the global cache without
1203 /// updating the state of the search graph.
1204fn lookup_global_cache_untracked(
1205&self,
1206 cx: X,
1207 input: X::Input,
1208 step_kind_from_parent: PathKind,
1209 available_depth: AvailableDepth,
1210 ) -> Option<X::Result> {
1211cx.with_global_cache(|cache| {
1212cache1213 .get(cx, input, available_depth, |nested_goals| {
1214self.candidate_is_applicable(step_kind_from_parent, nested_goals)
1215 })
1216 .map(|c| c.result)
1217 })
1218 }
12191220/// Try to fetch a previously computed result from the global cache,
1221 /// making sure to only do so if it would match the result of reevaluating
1222 /// this goal.
1223fn lookup_global_cache(
1224&mut self,
1225 cx: X,
1226 input: X::Input,
1227 step_kind_from_parent: PathKind,
1228 available_depth: AvailableDepth,
1229 ) -> Option<X::Result> {
1230cx.with_global_cache(|cache| {
1231let CacheData { result, required_depth, encountered_overflow, nested_goals } = cache1232 .get(cx, input, available_depth, |nested_goals| {
1233self.candidate_is_applicable(step_kind_from_parent, nested_goals)
1234 })?;
12351236// We don't move cycle participants to the global cache, so the
1237 // cycle heads are always empty.
1238let heads = iter::empty();
1239Self::update_parent_goal(
1240&mut self.stack,
1241step_kind_from_parent,
1242required_depth,
1243heads,
1244encountered_overflow,
1245 UpdateParentGoalCtxt::Ordinary(nested_goals),
1246 );
12471248{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1248",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1248u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message",
"required_depth"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("global cache hit")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&required_depth)
as &dyn Value))])
});
} else { ; }
};debug!(?required_depth, "global cache hit");
1249Some(result)
1250 })
1251 }
12521253fn check_cycle_on_stack(
1254&mut self,
1255 cx: X,
1256 input: X::Input,
1257 step_kind_from_parent: PathKind,
1258 ) -> Option<X::Result> {
1259let head_index = self.stack.find(input)?;
1260// We have a nested goal which directly relies on a goal deeper in the stack.
1261 //
1262 // We start by tagging all cycle participants, as that's necessary for caching.
1263 //
1264 // Finally we can return either the provisional response or the initial response
1265 // in case we're in the first fixpoint iteration for this goal.
1266let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index);
1267{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1267",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1267u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message",
"path_kind"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("encountered cycle with depth {0:?}",
head_index) as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&path_kind)
as &dyn Value))])
});
} else { ; }
};debug!(?path_kind, "encountered cycle with depth {head_index:?}");
1268let mut usages = HeadUsages::default();
1269usages.add_usage(path_kind);
1270let head = CycleHead { paths_to_head: step_kind_from_parent.into(), usages };
1271Self::update_parent_goal(
1272&mut self.stack,
1273step_kind_from_parent,
12740,
1275 iter::once((head_index, head)),
1276false,
1277 UpdateParentGoalCtxt::CycleOnStack(input),
1278 );
12791280// Return the provisional result or, if we're in the first iteration,
1281 // start with no constraints.
1282if let Some(result) = self.stack[head_index].provisional_result {
1283Some(result)
1284 } else {
1285Some(D::initial_provisional_result(cx, path_kind, input))
1286 }
1287 }
12881289/// Whether we've reached a fixpoint when evaluating a cycle head.
1290x;#[instrument(level = "trace", skip(self, stack_entry), ret)]1291fn reached_fixpoint(
1292&mut self,
1293 stack_entry: &StackEntry<X>,
1294 usages: HeadUsages,
1295 result: X::Result,
1296 ) -> Result<Option<PathKind>, ()> {
1297let provisional_result = stack_entry.provisional_result;
1298if let Some(provisional_result) = provisional_result {
1299if provisional_result == result { Ok(None) } else { Err(()) }
1300 } else if let Some(path_kind) = D::is_initial_provisional_result(result)
1301 .filter(|&path_kind| usages.is_single(path_kind))
1302 {
1303Ok(Some(path_kind))
1304 } else {
1305Err(())
1306 }
1307 }
13081309/// When we encounter a coinductive cycle, we have to fetch the
1310 /// result of that cycle while we are still computing it. Because
1311 /// of this we continuously recompute the cycle until the result
1312 /// of the previous iteration is equal to the final result, at which
1313 /// point we are done.
1314fn evaluate_goal_in_task(
1315&mut self,
1316 cx: X,
1317 input: X::Input,
1318 inspect: &mut D::ProofTreeBuilder,
1319 ) -> EvaluationResult<X> {
1320// We reset `encountered_overflow` each time we rerun this goal
1321 // but need to make sure we currently propagate it to the global
1322 // cache even if only some of the evaluations actually reach the
1323 // recursion limit.
1324let mut encountered_overflow = false;
1325let mut i = 0;
1326loop {
1327let result = D::compute_goal(self, cx, input, inspect);
1328let stack_entry = self.stack.pop();
1329encountered_overflow |= stack_entry.encountered_overflow;
1330if true {
match (&stack_entry.input, &input) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(stack_entry.input, input);
13311332// If the current goal is not a cycle head, we are done.
1333 //
1334 // There are no provisional cache entries which depend on this goal.
1335let Some(usages) = stack_entry.usages else {
1336return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1337 };
13381339// If it is a cycle head, we have to keep trying to prove it until
1340 // we reach a fixpoint. We need to do so for all cycle heads,
1341 // not only for the root.
1342 //
1343 // See tests/ui/traits/next-solver/cycles/fixpoint-rerun-all-cycle-heads.rs
1344 // for an example.
1345 //
1346 // Check whether we reached a fixpoint, either because the final result
1347 // is equal to the provisional result of the previous iteration, or because
1348 // this was only the head of either coinductive or inductive cycles, and the
1349 // final result is equal to the initial response for that case.
1350if let Ok(fixpoint) = self.reached_fixpoint(&stack_entry, usages, result) {
1351self.rebase_provisional_cache_entries(
1352cx,
1353&stack_entry,
1354 RebaseReason::ReachedFixpoint(fixpoint),
1355 );
1356return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1357 } else if usages.is_empty() {
1358self.rebase_provisional_cache_entries(
1359cx,
1360&stack_entry,
1361 RebaseReason::NoCycleUsages,
1362 );
1363return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1364 }
13651366// If computing this goal results in ambiguity with no constraints,
1367 // we do not rerun it. It's incredibly difficult to get a different
1368 // response in the next iteration in this case. These changes would
1369 // likely either be caused by incompleteness or can change the maybe
1370 // cause from ambiguity to overflow. Returning ambiguity always
1371 // preserves soundness and completeness even if the goal is be known
1372 // to succeed or fail.
1373 //
1374 // This prevents exponential blowup affecting multiple major crates.
1375 // As we only get to this branch if we haven't yet reached a fixpoint,
1376 // we also taint all provisional cache entries which depend on the
1377 // current goal.
1378if let Some(info) = D::is_ambiguous_result(result) {
1379self.rebase_provisional_cache_entries(
1380cx,
1381&stack_entry,
1382 RebaseReason::Ambiguity(info),
1383 );
1384return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1385 };
13861387// If we've reached the fixpoint step limit, we bail with overflow and taint all
1388 // provisional cache entries which depend on the current goal.
1389i += 1;
1390if i >= D::FIXPOINT_STEP_LIMIT {
1391{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1391",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1391u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("canonical cycle overflow")
as &dyn Value))])
});
} else { ; }
};debug!("canonical cycle overflow");
1392let result = D::fixpoint_overflow_result(cx, input);
1393self.rebase_provisional_cache_entries(cx, &stack_entry, RebaseReason::Overflow);
1394return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1395 }
13961397// Clear all provisional cache entries which depend on a previous provisional
1398 // result of this goal and rerun. This does not remove goals which accessed this
1399 // goal without depending on its result.
1400self.clear_dependent_provisional_results_for_rerun();
14011402{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1402",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1402u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message", "result"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("fixpoint changed provisional results")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&result) as
&dyn Value))])
});
} else { ; }
};debug!(?result, "fixpoint changed provisional results");
1403self.stack.push(StackEntry {
1404input,
1405 step_kind_from_parent: stack_entry.step_kind_from_parent,
1406 available_depth: stack_entry.available_depth,
1407 provisional_result: Some(result),
1408// We can keep these goals from previous iterations as they are only
1409 // ever read after finalizing this evaluation.
1410required_depth: stack_entry.required_depth,
1411 heads: stack_entry.heads,
1412 nested_goals: stack_entry.nested_goals,
1413// We reset these two fields when rerunning this goal. We could
1414 // keep `encountered_overflow` as it's only used as a performance
1415 // optimization. However, given that the proof tree will likely look
1416 // similar to the previous iterations when reevaluating, it's better
1417 // for caching if the reevaluation also starts out with `false`.
1418encountered_overflow: false,
1419// We keep provisional cache entries around if they used this goal
1420 // without depending on its result.
1421 //
1422 // We still need to drop or rebase these cache entries once we've
1423 // finished evaluating this goal.
1424usages: Some(HeadUsages::default()),
1425 candidate_usages: None,
1426 });
1427 }
1428 }
14291430/// When encountering a cycle, both inductive and coinductive, we only
1431 /// move the root into the global cache. We also store all other cycle
1432 /// participants involved.
1433 ///
1434 /// We must not use the global cache entry of a root goal if a cycle
1435 /// participant is on the stack. This is necessary to prevent unstable
1436 /// results. See the comment of `StackEntry::nested_goals` for
1437 /// more details.
1438fn insert_global_cache(
1439&mut self,
1440 cx: X,
1441 input: X::Input,
1442 evaluation_result: EvaluationResult<X>,
1443 dep_node: X::DepNodeIndex,
1444 ) {
1445{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1445",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1445u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message",
"evaluation_result"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("insert global cache")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&evaluation_result)
as &dyn Value))])
});
} else { ; }
};debug!(?evaluation_result, "insert global cache");
1446cx.with_global_cache(|cache| cache.insert(cx, input, evaluation_result, dep_node))
1447 }
1448}