1//! The search graph is responsible for caching and cycle detection in the trait
2//! solver. Making sure that caching doesn't result in soundness bugs or unstable
3//! query results is very challenging and makes this one of the most-involved
4//! self-contained components of the compiler.
5//!
6//! We added fuzzing support to test its correctness. The fuzzers used to verify
7//! the current implementation can be found in <https://github.com/lcnr/search_graph_fuzz>.
8//!
9//! This is just a quick overview of the general design, please check out the relevant
10//! [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for
11//! more details. Caching is split between a global cache and the per-cycle `provisional_cache`.
12//! The global cache has to be completely unobservable, while the per-cycle cache may impact
13//! behavior as long as the resulting behavior is still correct.
14use std::cmp::Ordering;
15use std::collections::hash_map::Entry;
16use std::collections::{BTreeMap, btree_map};
17use std::fmt::Debug;
18use std::hash::Hash;
19use std::iter;
20use std::marker::PhantomData;
2122use derive_where::derive_where;
23#[cfg(feature = "nightly")]
24use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_NoContext};
25use rustc_type_ir::data_structures::HashMap;
26use tracing::{debug, instrument, trace};
2728mod stack;
29use stack::{Stack, StackDepth, StackEntry};
30mod global_cache;
31use global_cache::CacheData;
32pub use global_cache::GlobalCache;
3334/// The search graph does not simply use `Interner` directly
35/// to enable its fuzzing without having to stub the rest of
36/// the interner. We don't make this a super trait of `Interner`
37/// as users of the shared type library shouldn't have to care
38/// about `Input` and `Result` as they are implementation details
39/// of the search graph.
40pub trait Cx: Copy {
41type Input: Debug + Eq + Hash + Copy;
42type Result: Debug + Eq + Hash + Copy;
43type AmbiguityInfo: Debug + Eq + Hash + Copy;
4445type DepNodeIndex;
46type Tracked<T: Debug + Clone>: Debug;
47fn mk_tracked<T: Debug + Clone>(
48self,
49 data: T,
50 dep_node_index: Self::DepNodeIndex,
51 ) -> Self::Tracked<T>;
52fn get_tracked<T: Debug + Clone>(self, tracked: &Self::Tracked<T>) -> T;
53fn with_cached_task<T>(self, task: impl FnOnce() -> T) -> (T, Self::DepNodeIndex);
5455fn with_global_cache<R>(self, f: impl FnOnce(&mut GlobalCache<Self>) -> R) -> R;
5657fn assert_evaluation_is_concurrent(&self);
58}
5960pub trait Delegate: Sized {
61type Cx: Cx;
62/// Whether to use the provisional cache. Set to `false` by a fuzzer when
63 /// validating the search graph.
64const ENABLE_PROVISIONAL_CACHE: bool;
65type ValidationScope;
66/// Returning `Some` disables the global cache for the current goal.
67 ///
68 /// The `ValidationScope` is used when fuzzing the search graph to track
69 /// for which goals the global cache has been disabled. This is necessary
70 /// as we may otherwise ignore the global cache entry for some goal `G`
71 /// only to later use it, failing to detect a cycle goal and potentially
72 /// changing the result.
73fn enter_validation_scope(
74 cx: Self::Cx,
75 input: <Self::Cx as Cx>::Input,
76 ) -> Option<Self::ValidationScope>;
7778const FIXPOINT_STEP_LIMIT: usize;
7980type ProofTreeBuilder;
81fn inspect_is_noop(inspect: &mut Self::ProofTreeBuilder) -> bool;
8283const DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW: usize;
8485fn initial_provisional_result(
86 cx: Self::Cx,
87 kind: PathKind,
88 input: <Self::Cx as Cx>::Input,
89 ) -> <Self::Cx as Cx>::Result;
90fn is_initial_provisional_result(result: <Self::Cx as Cx>::Result) -> Option<PathKind>;
91fn stack_overflow_result(
92 cx: Self::Cx,
93 input: <Self::Cx as Cx>::Input,
94 ) -> <Self::Cx as Cx>::Result;
95fn fixpoint_overflow_result(
96 cx: Self::Cx,
97 input: <Self::Cx as Cx>::Input,
98 ) -> <Self::Cx as Cx>::Result;
99100fn is_ambiguous_result(
101 result: <Self::Cx as Cx>::Result,
102 ) -> Option<<Self::Cx as Cx>::AmbiguityInfo>;
103fn propagate_ambiguity(
104 cx: Self::Cx,
105 for_input: <Self::Cx as Cx>::Input,
106 ambiguity_info: <Self::Cx as Cx>::AmbiguityInfo,
107 ) -> <Self::Cx as Cx>::Result;
108109fn compute_goal(
110 search_graph: &mut SearchGraph<Self>,
111 cx: Self::Cx,
112 input: <Self::Cx as Cx>::Input,
113 inspect: &mut Self::ProofTreeBuilder,
114 ) -> <Self::Cx as Cx>::Result;
115}
116117/// In the initial iteration of a cycle, we do not yet have a provisional
118/// result. In the case we return an initial provisional result depending
119/// on the kind of cycle.
120#[derive(#[automatically_derived]
impl ::core::fmt::Debug for PathKind {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::write_str(f,
match self {
PathKind::Inductive => "Inductive",
PathKind::Unknown => "Unknown",
PathKind::Coinductive => "Coinductive",
PathKind::ForcedAmbiguity => "ForcedAmbiguity",
})
}
}Debug, #[automatically_derived]
impl ::core::clone::Clone for PathKind {
#[inline]
fn clone(&self) -> PathKind { *self }
}Clone, #[automatically_derived]
impl ::core::marker::Copy for PathKind { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for PathKind {
#[inline]
fn eq(&self, other: &PathKind) -> bool {
let __self_discr = ::core::intrinsics::discriminant_value(self);
let __arg1_discr = ::core::intrinsics::discriminant_value(other);
__self_discr == __arg1_discr
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for PathKind {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {}
}Eq, #[automatically_derived]
impl ::core::hash::Hash for PathKind {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
let __self_discr = ::core::intrinsics::discriminant_value(self);
::core::hash::Hash::hash(&__self_discr, state)
}
}Hash)]
121#[cfg_attr(
122 feature = "nightly",
123 derive(const _: () =
{
impl<__D: ::rustc_serialize::Decoder>
::rustc_serialize::Decodable<__D> for PathKind {
fn decode(__decoder: &mut __D) -> Self {
match ::rustc_serialize::Decoder::read_u8(__decoder) as usize
{
0usize => { PathKind::Inductive }
1usize => { PathKind::Unknown }
2usize => { PathKind::Coinductive }
3usize => { PathKind::ForcedAmbiguity }
n => {
::core::panicking::panic_fmt(format_args!("invalid enum variant tag while decoding `PathKind`, expected 0..4, actual {0}",
n));
}
}
}
}
};Decodable_NoContext, const _: () =
{
impl<__E: ::rustc_serialize::Encoder>
::rustc_serialize::Encodable<__E> for PathKind {
fn encode(&self, __encoder: &mut __E) {
let disc =
match *self {
PathKind::Inductive => { 0usize }
PathKind::Unknown => { 1usize }
PathKind::Coinductive => { 2usize }
PathKind::ForcedAmbiguity => { 3usize }
};
::rustc_serialize::Encoder::emit_u8(__encoder, disc as u8);
match *self {
PathKind::Inductive => {}
PathKind::Unknown => {}
PathKind::Coinductive => {}
PathKind::ForcedAmbiguity => {}
}
}
}
};Encodable_NoContext, const _: () =
{
impl<__CTX> ::rustc_data_structures::stable_hasher::HashStable<__CTX>
for PathKind {
#[inline]
fn hash_stable(&self, __hcx: &mut __CTX,
__hasher:
&mut ::rustc_data_structures::stable_hasher::StableHasher) {
::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
match *self {
PathKind::Inductive => {}
PathKind::Unknown => {}
PathKind::Coinductive => {}
PathKind::ForcedAmbiguity => {}
}
}
}
};HashStable_NoContext)
124)]
125pub enum PathKind {
126/// A path consisting of only inductive/unproductive steps. Their initial
127 /// provisional result is `Err(NoSolution)`. We currently treat them as
128 /// `PathKind::Unknown` during coherence until we're fully confident in
129 /// our approach.
130Inductive,
131/// A path which is not be coinductive right now but we may want
132 /// to change of them to be so in the future. We return an ambiguous
133 /// result in this case to prevent people from relying on this.
134Unknown,
135/// A path with at least one coinductive step. Such cycles hold.
136Coinductive,
137/// A path which is treated as ambiguous. Once a path has this path kind
138 /// any other segment does not change its kind.
139 ///
140 /// This is currently only used when fuzzing to support negative reasoning.
141 /// For more details, see #143054.
142ForcedAmbiguity,
143}
144145impl PathKind {
146/// Returns the path kind when merging `self` with `rest`.
147 ///
148 /// Given an inductive path `self` and a coinductive path `rest`,
149 /// the path `self -> rest` would be coinductive.
150 ///
151 /// This operation represents an ordering and would be equivalent
152 /// to `max(self, rest)`.
153fn extend(self, rest: PathKind) -> PathKind {
154match (self, rest) {
155 (PathKind::ForcedAmbiguity, _) | (_, PathKind::ForcedAmbiguity) => {
156 PathKind::ForcedAmbiguity157 }
158 (PathKind::Coinductive, _) | (_, PathKind::Coinductive) => PathKind::Coinductive,
159 (PathKind::Unknown, _) | (_, PathKind::Unknown) => PathKind::Unknown,
160 (PathKind::Inductive, PathKind::Inductive) => PathKind::Inductive,
161 }
162 }
163}
164165/// The kinds of cycles a cycle head was involved in.
166///
167/// This is used to avoid rerunning a cycle if there's
168/// just a single usage kind and the final result matches
169/// its provisional result.
170///
171/// While it tracks the amount of usages using `u32`, we only ever
172/// care whether there are any. We only count them to be able to ignore
173/// usages from irrelevant candidates while evaluating a goal.
174///
175/// This cares about how nested goals relied on a cycle head. It does
176/// not care about how frequently the nested goal relied on it.
177#[derive(#[automatically_derived]
impl ::core::default::Default for HeadUsages {
#[inline]
fn default() -> HeadUsages {
HeadUsages {
inductive: ::core::default::Default::default(),
unknown: ::core::default::Default::default(),
coinductive: ::core::default::Default::default(),
forced_ambiguity: ::core::default::Default::default(),
}
}
}Default, #[automatically_derived]
impl ::core::fmt::Debug for HeadUsages {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field4_finish(f, "HeadUsages",
"inductive", &self.inductive, "unknown", &self.unknown,
"coinductive", &self.coinductive, "forced_ambiguity",
&&self.forced_ambiguity)
}
}Debug, #[automatically_derived]
impl ::core::clone::Clone for HeadUsages {
#[inline]
fn clone(&self) -> HeadUsages {
let _: ::core::clone::AssertParamIsClone<u32>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for HeadUsages { }Copy, #[automatically_derived]
impl ::core::cmp::PartialEq for HeadUsages {
#[inline]
fn eq(&self, other: &HeadUsages) -> bool {
self.inductive == other.inductive && self.unknown == other.unknown &&
self.coinductive == other.coinductive &&
self.forced_ambiguity == other.forced_ambiguity
}
}PartialEq, #[automatically_derived]
impl ::core::cmp::Eq for HeadUsages {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u32>;
}
}Eq)]
178struct HeadUsages {
179 inductive: u32,
180 unknown: u32,
181 coinductive: u32,
182 forced_ambiguity: u32,
183}
184185impl HeadUsages {
186fn add_usage(&mut self, path: PathKind) {
187match path {
188 PathKind::Inductive => self.inductive += 1,
189 PathKind::Unknown => self.unknown += 1,
190 PathKind::Coinductive => self.coinductive += 1,
191 PathKind::ForcedAmbiguity => self.forced_ambiguity += 1,
192 }
193 }
194195/// This adds the usages which occurred while computing a nested goal.
196 ///
197 /// We don't actually care about how frequently the nested goal relied
198 /// on its cycle heads, only whether it did.
199fn add_usages_from_nested(&mut self, usages: HeadUsages) {
200let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = usages;
201self.inductive += if inductive == 0 { 0 } else { 1 };
202self.unknown += if unknown == 0 { 0 } else { 1 };
203self.coinductive += if coinductive == 0 { 0 } else { 1 };
204self.forced_ambiguity += if forced_ambiguity == 0 { 0 } else { 1 };
205 }
206207fn ignore_usages(&mut self, usages: HeadUsages) {
208let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = usages;
209self.inductive = self.inductive.checked_sub(inductive).unwrap();
210self.unknown = self.unknown.checked_sub(unknown).unwrap();
211self.coinductive = self.coinductive.checked_sub(coinductive).unwrap();
212self.forced_ambiguity = self.forced_ambiguity.checked_sub(forced_ambiguity).unwrap();
213 }
214215fn is_empty(self) -> bool {
216let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = self;
217inductive == 0 && unknown == 0 && coinductive == 0 && forced_ambiguity == 0
218}
219220fn is_single(self, path_kind: PathKind) -> bool {
221match path_kind {
222 PathKind::Inductive => #[allow(non_exhaustive_omitted_patterns)] match self {
HeadUsages { inductive: _, unknown: 0, coinductive: 0, forced_ambiguity: 0
} => true,
_ => false,
}matches!(
223self,
224 HeadUsages { inductive: _, unknown: 0, coinductive: 0, forced_ambiguity: 0 },
225 ),
226 PathKind::Unknown => #[allow(non_exhaustive_omitted_patterns)] match self {
HeadUsages { inductive: 0, unknown: _, coinductive: 0, forced_ambiguity: 0
} => true,
_ => false,
}matches!(
227self,
228 HeadUsages { inductive: 0, unknown: _, coinductive: 0, forced_ambiguity: 0 },
229 ),
230 PathKind::Coinductive => #[allow(non_exhaustive_omitted_patterns)] match self {
HeadUsages { inductive: 0, unknown: 0, coinductive: _, forced_ambiguity: 0
} => true,
_ => false,
}matches!(
231self,
232 HeadUsages { inductive: 0, unknown: 0, coinductive: _, forced_ambiguity: 0 },
233 ),
234 PathKind::ForcedAmbiguity => #[allow(non_exhaustive_omitted_patterns)] match self {
HeadUsages { inductive: 0, unknown: 0, coinductive: 0, forced_ambiguity: _
} => true,
_ => false,
}matches!(
235self,
236 HeadUsages { inductive: 0, unknown: 0, coinductive: 0, forced_ambiguity: _ },
237 ),
238 }
239 }
240}
241242#[derive(#[automatically_derived]
impl ::core::fmt::Debug for CandidateHeadUsages {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field1_finish(f,
"CandidateHeadUsages", "usages", &&self.usages)
}
}Debug, #[automatically_derived]
impl ::core::default::Default for CandidateHeadUsages {
#[inline]
fn default() -> CandidateHeadUsages {
CandidateHeadUsages { usages: ::core::default::Default::default() }
}
}Default)]
243pub struct CandidateHeadUsages {
244 usages: Option<Box<HashMap<StackDepth, HeadUsages>>>,
245}
246impl CandidateHeadUsages {
247pub fn merge_usages(&mut self, other: CandidateHeadUsages) {
248if let Some(other_usages) = other.usages {
249if let Some(ref mut self_usages) = self.usages {
250// Each head is merged independently, so the final usage counts are the same
251 // regardless of hash iteration order.
252#[allow(rustc::potential_query_instability)]
253for (head_index, head) in other_usages.into_iter() {
254let HeadUsages { inductive, unknown, coinductive, forced_ambiguity } = head;
255let self_usages = self_usages.entry(head_index).or_default();
256 self_usages.inductive += inductive;
257 self_usages.unknown += unknown;
258 self_usages.coinductive += coinductive;
259 self_usages.forced_ambiguity += forced_ambiguity;
260 }
261 } else {
262self.usages = Some(other_usages);
263 }
264 }
265 }
266}
267268#[derive(#[automatically_derived]
impl ::core::fmt::Debug for AvailableDepth {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "AvailableDepth",
&&self.0)
}
}Debug, #[automatically_derived]
impl ::core::clone::Clone for AvailableDepth {
#[inline]
fn clone(&self) -> AvailableDepth {
let _: ::core::clone::AssertParamIsClone<usize>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for AvailableDepth { }Copy)]
269struct AvailableDepth(usize);
270impl AvailableDepth {
271/// Returns the remaining depth allowed for nested goals.
272 ///
273 /// This is generally simply one less than the current depth.
274 /// However, if we encountered overflow, we significantly reduce
275 /// the remaining depth of all nested goals to prevent hangs
276 /// in case there is exponential blowup.
277fn allowed_depth_for_nested<D: Delegate>(
278 root_depth: AvailableDepth,
279 stack: &Stack<D::Cx>,
280 ) -> Option<AvailableDepth> {
281if let Some(last) = stack.last() {
282if last.available_depth.0 == 0 {
283return None;
284 }
285286Some(if last.encountered_overflow {
287AvailableDepth(last.available_depth.0 / D::DIVIDE_AVAILABLE_DEPTH_ON_OVERFLOW)
288 } else {
289AvailableDepth(last.available_depth.0 - 1)
290 })
291 } else {
292Some(root_depth)
293 }
294 }
295296/// Whether we're allowed to use a global cache entry which required
297 /// the given depth.
298fn cache_entry_is_applicable(self, additional_depth: usize) -> bool {
299self.0 >= additional_depth300 }
301}
302303#[derive(#[automatically_derived]
impl ::core::clone::Clone for CycleHead {
#[inline]
fn clone(&self) -> CycleHead {
let _: ::core::clone::AssertParamIsClone<PathsToNested>;
let _: ::core::clone::AssertParamIsClone<HeadUsages>;
*self
}
}Clone, #[automatically_derived]
impl ::core::marker::Copy for CycleHead { }Copy, #[automatically_derived]
impl ::core::fmt::Debug for CycleHead {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field2_finish(f, "CycleHead",
"paths_to_head", &self.paths_to_head, "usages", &&self.usages)
}
}Debug)]
304struct CycleHead {
305 paths_to_head: PathsToNested,
306/// If the `usages` are empty, the result of that head does not matter
307 /// for the current goal. However, we still don't completely drop this
308 /// cycle head as whether or not it exists impacts which queries we
309 /// access, so ignoring it would cause incremental compilation verification
310 /// failures or hide query cycles.
311usages: HeadUsages,
312}
313314/// All cycle heads a given goal depends on, ordered by their stack depth.
315///
316/// We also track all paths from this goal to that head. This is necessary
317/// when rebasing provisional cache results.
318#[derive(#[automatically_derived]
impl ::core::clone::Clone for CycleHeads {
#[inline]
fn clone(&self) -> CycleHeads {
CycleHeads { heads: ::core::clone::Clone::clone(&self.heads) }
}
}Clone, #[automatically_derived]
impl ::core::fmt::Debug for CycleHeads {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_struct_field1_finish(f, "CycleHeads",
"heads", &&self.heads)
}
}Debug, #[automatically_derived]
impl ::core::default::Default for CycleHeads {
#[inline]
fn default() -> CycleHeads {
CycleHeads { heads: ::core::default::Default::default() }
}
}Default)]
319struct CycleHeads {
320 heads: BTreeMap<StackDepth, CycleHead>,
321}
322323impl CycleHeads {
324fn is_empty(&self) -> bool {
325self.heads.is_empty()
326 }
327328fn highest_cycle_head(&self) -> (StackDepth, CycleHead) {
329self.heads.last_key_value().map(|(k, v)| (*k, *v)).unwrap()
330 }
331332fn highest_cycle_head_index(&self) -> StackDepth {
333self.opt_highest_cycle_head_index().unwrap()
334 }
335336fn opt_highest_cycle_head_index(&self) -> Option<StackDepth> {
337self.heads.last_key_value().map(|(k, _)| *k)
338 }
339340fn opt_lowest_cycle_head_index(&self) -> Option<StackDepth> {
341self.heads.first_key_value().map(|(k, _)| *k)
342 }
343344fn remove_highest_cycle_head(&mut self) -> CycleHead {
345let last = self.heads.pop_last();
346last.unwrap().1
347}
348349fn insert(
350&mut self,
351 head_index: StackDepth,
352 path_from_entry: impl Into<PathsToNested> + Copy,
353 usages: HeadUsages,
354 ) {
355match self.heads.entry(head_index) {
356 btree_map::Entry::Vacant(entry) => {
357entry.insert(CycleHead { paths_to_head: path_from_entry.into(), usages });
358 }
359 btree_map::Entry::Occupied(entry) => {
360let head = entry.into_mut();
361head.paths_to_head |= path_from_entry.into();
362head.usages.add_usages_from_nested(usages);
363 }
364 }
365 }
366367fn ignore_usages(&mut self, head_index: StackDepth, usages: HeadUsages) {
368self.heads.get_mut(&head_index).unwrap().usages.ignore_usages(usages)
369 }
370371fn iter(&self) -> impl Iterator<Item = (StackDepth, CycleHead)> + '_ {
372self.heads.iter().map(|(k, v)| (*k, *v))
373 }
374}
375376#[doc =
r" Tracks how nested goals have been accessed. This is necessary to disable"]
#[doc =
r" global cache entries if computing them would otherwise result in a cycle or"]
#[doc = r" access a provisional cache entry."]
pub struct PathsToNested(<PathsToNested as
::bitflags::__private::PublicFlags>::Internal);
#[automatically_derived]
impl ::core::fmt::Debug for PathsToNested {
#[inline]
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
::core::fmt::Formatter::debug_tuple_field1_finish(f, "PathsToNested",
&&self.0)
}
}
#[automatically_derived]
#[doc(hidden)]
unsafe impl ::core::clone::TrivialClone for PathsToNested { }
#[automatically_derived]
impl ::core::clone::Clone for PathsToNested {
#[inline]
fn clone(&self) -> PathsToNested {
let _:
::core::clone::AssertParamIsClone<<PathsToNested as
::bitflags::__private::PublicFlags>::Internal>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for PathsToNested { }
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for PathsToNested { }
#[automatically_derived]
impl ::core::cmp::PartialEq for PathsToNested {
#[inline]
fn eq(&self, other: &PathsToNested) -> bool { self.0 == other.0 }
}
#[automatically_derived]
impl ::core::cmp::Eq for PathsToNested {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _:
::core::cmp::AssertParamIsEq<<PathsToNested as
::bitflags::__private::PublicFlags>::Internal>;
}
}
impl PathsToNested {
#[doc = r" The initial value when adding a goal to its own nested goals."]
#[allow(deprecated, non_upper_case_globals,)]
pub const EMPTY: Self = Self::from_bits_retain(1 << 0);
#[allow(deprecated, non_upper_case_globals,)]
pub const INDUCTIVE: Self = Self::from_bits_retain(1 << 1);
#[allow(deprecated, non_upper_case_globals,)]
pub const UNKNOWN: Self = Self::from_bits_retain(1 << 2);
#[allow(deprecated, non_upper_case_globals,)]
pub const COINDUCTIVE: Self = Self::from_bits_retain(1 << 3);
#[allow(deprecated, non_upper_case_globals,)]
pub const FORCED_AMBIGUITY: Self = Self::from_bits_retain(1 << 4);
}
impl ::bitflags::Flags for PathsToNested {
const FLAGS: &'static [::bitflags::Flag<PathsToNested>] =
&[{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("EMPTY", PathsToNested::EMPTY)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("INDUCTIVE", PathsToNested::INDUCTIVE)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("UNKNOWN", PathsToNested::UNKNOWN)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("COINDUCTIVE",
PathsToNested::COINDUCTIVE)
},
{
#[allow(deprecated, non_upper_case_globals,)]
::bitflags::Flag::new("FORCED_AMBIGUITY",
PathsToNested::FORCED_AMBIGUITY)
}];
type Bits = u8;
fn bits(&self) -> u8 { PathsToNested::bits(self) }
fn from_bits_retain(bits: u8) -> PathsToNested {
PathsToNested::from_bits_retain(bits)
}
}
#[allow(dead_code, deprecated, unused_doc_comments, unused_attributes,
unused_mut, unused_imports, non_upper_case_globals, clippy ::
assign_op_pattern, clippy :: indexing_slicing, clippy :: same_name_method,
clippy :: iter_without_into_iter,)]
const _: () =
{
#[repr(transparent)]
pub struct InternalBitFlags(u8);
#[automatically_derived]
#[doc(hidden)]
unsafe impl ::core::clone::TrivialClone for InternalBitFlags { }
#[automatically_derived]
impl ::core::clone::Clone for InternalBitFlags {
#[inline]
fn clone(&self) -> InternalBitFlags {
let _: ::core::clone::AssertParamIsClone<u8>;
*self
}
}
#[automatically_derived]
impl ::core::marker::Copy for InternalBitFlags { }
#[automatically_derived]
impl ::core::marker::StructuralPartialEq for InternalBitFlags { }
#[automatically_derived]
impl ::core::cmp::PartialEq for InternalBitFlags {
#[inline]
fn eq(&self, other: &InternalBitFlags) -> bool {
self.0 == other.0
}
}
#[automatically_derived]
impl ::core::cmp::Eq for InternalBitFlags {
#[inline]
#[doc(hidden)]
#[coverage(off)]
fn assert_fields_are_eq(&self) {
let _: ::core::cmp::AssertParamIsEq<u8>;
}
}
#[automatically_derived]
impl ::core::cmp::PartialOrd for InternalBitFlags {
#[inline]
fn partial_cmp(&self, other: &InternalBitFlags)
-> ::core::option::Option<::core::cmp::Ordering> {
::core::cmp::PartialOrd::partial_cmp(&self.0, &other.0)
}
}
#[automatically_derived]
impl ::core::cmp::Ord for InternalBitFlags {
#[inline]
fn cmp(&self, other: &InternalBitFlags) -> ::core::cmp::Ordering {
::core::cmp::Ord::cmp(&self.0, &other.0)
}
}
#[automatically_derived]
impl ::core::hash::Hash for InternalBitFlags {
#[inline]
fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) {
::core::hash::Hash::hash(&self.0, state)
}
}
impl ::bitflags::__private::PublicFlags for PathsToNested {
type Primitive = u8;
type Internal = InternalBitFlags;
}
impl ::bitflags::__private::core::default::Default for
InternalBitFlags {
#[inline]
fn default() -> Self { InternalBitFlags::empty() }
}
impl ::bitflags::__private::core::fmt::Debug for InternalBitFlags {
fn fmt(&self,
f: &mut ::bitflags::__private::core::fmt::Formatter<'_>)
-> ::bitflags::__private::core::fmt::Result {
if self.is_empty() {
f.write_fmt(format_args!("{0:#x}",
<u8 as ::bitflags::Bits>::EMPTY))
} else {
::bitflags::__private::core::fmt::Display::fmt(self, f)
}
}
}
impl ::bitflags::__private::core::fmt::Display for InternalBitFlags {
fn fmt(&self,
f: &mut ::bitflags::__private::core::fmt::Formatter<'_>)
-> ::bitflags::__private::core::fmt::Result {
::bitflags::parser::to_writer(&PathsToNested(*self), f)
}
}
impl ::bitflags::__private::core::str::FromStr for InternalBitFlags {
type Err = ::bitflags::parser::ParseError;
fn from_str(s: &str)
->
::bitflags::__private::core::result::Result<Self,
Self::Err> {
::bitflags::parser::from_str::<PathsToNested>(s).map(|flags|
flags.0)
}
}
impl ::bitflags::__private::core::convert::AsRef<u8> for
InternalBitFlags {
fn as_ref(&self) -> &u8 { &self.0 }
}
impl ::bitflags::__private::core::convert::From<u8> for
InternalBitFlags {
fn from(bits: u8) -> Self { Self::from_bits_retain(bits) }
}
#[allow(dead_code, deprecated, unused_attributes)]
impl InternalBitFlags {
/// Get a flags value with all bits unset.
#[inline]
pub const fn empty() -> Self {
Self(<u8 as ::bitflags::Bits>::EMPTY)
}
/// Get a flags value with all known bits set.
#[inline]
pub const fn all() -> Self {
let mut truncated = <u8 as ::bitflags::Bits>::EMPTY;
let mut i = 0;
{
{
let flag =
<PathsToNested as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<PathsToNested as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<PathsToNested as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<PathsToNested as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
{
{
let flag =
<PathsToNested as
::bitflags::Flags>::FLAGS[i].value().bits();
truncated = truncated | flag;
i += 1;
}
};
let _ = i;
Self(truncated)
}
/// Get the underlying bits value.
///
/// The returned value is exactly the bits set in this flags value.
#[inline]
pub const fn bits(&self) -> u8 { self.0 }
/// Convert from a bits value.
///
/// This method will return `None` if any unknown bits are set.
#[inline]
pub const fn from_bits(bits: u8)
-> ::bitflags::__private::core::option::Option<Self> {
let truncated = Self::from_bits_truncate(bits).0;
if truncated == bits {
::bitflags::__private::core::option::Option::Some(Self(bits))
} else { ::bitflags::__private::core::option::Option::None }
}
/// Convert from a bits value, unsetting any unknown bits.
#[inline]
pub const fn from_bits_truncate(bits: u8) -> Self {
Self(bits & Self::all().0)
}
/// Convert from a bits value exactly.
#[inline]
pub const fn from_bits_retain(bits: u8) -> Self { Self(bits) }
/// Get a flags value with the bits of a flag with the given name set.
///
/// This method will return `None` if `name` is empty or doesn't
/// correspond to any named flag.
#[inline]
pub fn from_name(name: &str)
-> ::bitflags::__private::core::option::Option<Self> {
{
if name == "EMPTY" {
return ::bitflags::__private::core::option::Option::Some(Self(PathsToNested::EMPTY.bits()));
}
};
;
{
if name == "INDUCTIVE" {
return ::bitflags::__private::core::option::Option::Some(Self(PathsToNested::INDUCTIVE.bits()));
}
};
;
{
if name == "UNKNOWN" {
return ::bitflags::__private::core::option::Option::Some(Self(PathsToNested::UNKNOWN.bits()));
}
};
;
{
if name == "COINDUCTIVE" {
return ::bitflags::__private::core::option::Option::Some(Self(PathsToNested::COINDUCTIVE.bits()));
}
};
;
{
if name == "FORCED_AMBIGUITY" {
return ::bitflags::__private::core::option::Option::Some(Self(PathsToNested::FORCED_AMBIGUITY.bits()));
}
};
;
let _ = name;
::bitflags::__private::core::option::Option::None
}
/// Whether all bits in this flags value are unset.
#[inline]
pub const fn is_empty(&self) -> bool {
self.0 == <u8 as ::bitflags::Bits>::EMPTY
}
/// Whether all known bits in this flags value are set.
#[inline]
pub const fn is_all(&self) -> bool {
Self::all().0 | self.0 == self.0
}
/// Whether any set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn intersects(&self, other: Self) -> bool {
self.0 & other.0 != <u8 as ::bitflags::Bits>::EMPTY
}
/// Whether all set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn contains(&self, other: Self) -> bool {
self.0 & other.0 == other.0
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
pub fn insert(&mut self, other: Self) {
*self = Self(self.0).union(other);
}
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `remove` won't truncate `other`, but the `!` operator will.
#[inline]
pub fn remove(&mut self, other: Self) {
*self = Self(self.0).difference(other);
}
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
pub fn toggle(&mut self, other: Self) {
*self = Self(self.0).symmetric_difference(other);
}
/// Call `insert` when `value` is `true` or `remove` when `value` is `false`.
#[inline]
pub fn set(&mut self, other: Self, value: bool) {
if value { self.insert(other); } else { self.remove(other); }
}
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn intersection(self, other: Self) -> Self {
Self(self.0 & other.0)
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn union(self, other: Self) -> Self {
Self(self.0 | other.0)
}
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
#[must_use]
pub const fn difference(self, other: Self) -> Self {
Self(self.0 & !other.0)
}
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn symmetric_difference(self, other: Self) -> Self {
Self(self.0 ^ other.0)
}
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
#[must_use]
pub const fn complement(self) -> Self {
Self::from_bits_truncate(!self.0)
}
}
impl ::bitflags::__private::core::fmt::Binary for InternalBitFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::Octal for InternalBitFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::LowerHex for InternalBitFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::UpperHex for InternalBitFlags {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::ops::BitOr for InternalBitFlags {
type Output = Self;
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor(self, other: InternalBitFlags) -> Self {
self.union(other)
}
}
impl ::bitflags::__private::core::ops::BitOrAssign for
InternalBitFlags {
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor_assign(&mut self, other: Self) { self.insert(other); }
}
impl ::bitflags::__private::core::ops::BitXor for InternalBitFlags {
type Output = Self;
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor(self, other: Self) -> Self {
self.symmetric_difference(other)
}
}
impl ::bitflags::__private::core::ops::BitXorAssign for
InternalBitFlags {
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
}
impl ::bitflags::__private::core::ops::BitAnd for InternalBitFlags {
type Output = Self;
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand(self, other: Self) -> Self { self.intersection(other) }
}
impl ::bitflags::__private::core::ops::BitAndAssign for
InternalBitFlags {
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand_assign(&mut self, other: Self) {
*self =
Self::from_bits_retain(self.bits()).intersection(other);
}
}
impl ::bitflags::__private::core::ops::Sub for InternalBitFlags {
type Output = Self;
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub(self, other: Self) -> Self { self.difference(other) }
}
impl ::bitflags::__private::core::ops::SubAssign for InternalBitFlags
{
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub_assign(&mut self, other: Self) { self.remove(other); }
}
impl ::bitflags::__private::core::ops::Not for InternalBitFlags {
type Output = Self;
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
fn not(self) -> Self { self.complement() }
}
impl ::bitflags::__private::core::iter::Extend<InternalBitFlags> for
InternalBitFlags {
/// The bitwise or (`|`) of the bits in each flags value.
fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(&mut self, iterator: T) {
for item in iterator { self.insert(item) }
}
}
impl ::bitflags::__private::core::iter::FromIterator<InternalBitFlags>
for InternalBitFlags {
/// The bitwise or (`|`) of the bits in each flags value.
fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(iterator: T) -> Self {
use ::bitflags::__private::core::iter::Extend;
let mut result = Self::empty();
result.extend(iterator);
result
}
}
impl InternalBitFlags {
/// Yield a set of contained flags values.
///
/// Each yielded flags value will correspond to a defined named flag. Any unknown bits
/// will be yielded together as a final flags value.
#[inline]
pub const fn iter(&self)
-> ::bitflags::iter::Iter<PathsToNested> {
::bitflags::iter::Iter::__private_const_new(<PathsToNested as
::bitflags::Flags>::FLAGS,
PathsToNested::from_bits_retain(self.bits()),
PathsToNested::from_bits_retain(self.bits()))
}
/// Yield a set of contained named flags values.
///
/// This method is like [`iter`](#method.iter), except only yields bits in contained named flags.
/// Any unknown bits, or bits not corresponding to a contained flag will not be yielded.
#[inline]
pub const fn iter_names(&self)
-> ::bitflags::iter::IterNames<PathsToNested> {
::bitflags::iter::IterNames::__private_const_new(<PathsToNested
as ::bitflags::Flags>::FLAGS,
PathsToNested::from_bits_retain(self.bits()),
PathsToNested::from_bits_retain(self.bits()))
}
}
impl ::bitflags::__private::core::iter::IntoIterator for
InternalBitFlags {
type Item = PathsToNested;
type IntoIter = ::bitflags::iter::Iter<PathsToNested>;
fn into_iter(self) -> Self::IntoIter { self.iter() }
}
impl InternalBitFlags {
/// Returns a mutable reference to the raw value of the flags currently stored.
#[inline]
pub fn bits_mut(&mut self) -> &mut u8 { &mut self.0 }
}
#[allow(dead_code, deprecated, unused_attributes)]
impl PathsToNested {
/// Get a flags value with all bits unset.
#[inline]
pub const fn empty() -> Self { Self(InternalBitFlags::empty()) }
/// Get a flags value with all known bits set.
#[inline]
pub const fn all() -> Self { Self(InternalBitFlags::all()) }
/// Get the underlying bits value.
///
/// The returned value is exactly the bits set in this flags value.
#[inline]
pub const fn bits(&self) -> u8 { self.0.bits() }
/// Convert from a bits value.
///
/// This method will return `None` if any unknown bits are set.
#[inline]
pub const fn from_bits(bits: u8)
-> ::bitflags::__private::core::option::Option<Self> {
match InternalBitFlags::from_bits(bits) {
::bitflags::__private::core::option::Option::Some(bits) =>
::bitflags::__private::core::option::Option::Some(Self(bits)),
::bitflags::__private::core::option::Option::None =>
::bitflags::__private::core::option::Option::None,
}
}
/// Convert from a bits value, unsetting any unknown bits.
#[inline]
pub const fn from_bits_truncate(bits: u8) -> Self {
Self(InternalBitFlags::from_bits_truncate(bits))
}
/// Convert from a bits value exactly.
#[inline]
pub const fn from_bits_retain(bits: u8) -> Self {
Self(InternalBitFlags::from_bits_retain(bits))
}
/// Get a flags value with the bits of a flag with the given name set.
///
/// This method will return `None` if `name` is empty or doesn't
/// correspond to any named flag.
#[inline]
pub fn from_name(name: &str)
-> ::bitflags::__private::core::option::Option<Self> {
match InternalBitFlags::from_name(name) {
::bitflags::__private::core::option::Option::Some(bits) =>
::bitflags::__private::core::option::Option::Some(Self(bits)),
::bitflags::__private::core::option::Option::None =>
::bitflags::__private::core::option::Option::None,
}
}
/// Whether all bits in this flags value are unset.
#[inline]
pub const fn is_empty(&self) -> bool { self.0.is_empty() }
/// Whether all known bits in this flags value are set.
#[inline]
pub const fn is_all(&self) -> bool { self.0.is_all() }
/// Whether any set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn intersects(&self, other: Self) -> bool {
self.0.intersects(other.0)
}
/// Whether all set bits in a source flags value are also set in a target flags value.
#[inline]
pub const fn contains(&self, other: Self) -> bool {
self.0.contains(other.0)
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
pub fn insert(&mut self, other: Self) { self.0.insert(other.0) }
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `remove` won't truncate `other`, but the `!` operator will.
#[inline]
pub fn remove(&mut self, other: Self) { self.0.remove(other.0) }
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
pub fn toggle(&mut self, other: Self) { self.0.toggle(other.0) }
/// Call `insert` when `value` is `true` or `remove` when `value` is `false`.
#[inline]
pub fn set(&mut self, other: Self, value: bool) {
self.0.set(other.0, value)
}
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn intersection(self, other: Self) -> Self {
Self(self.0.intersection(other.0))
}
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn union(self, other: Self) -> Self {
Self(self.0.union(other.0))
}
/// The intersection of a source flags value with the complement of a target flags
/// value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
#[must_use]
pub const fn difference(self, other: Self) -> Self {
Self(self.0.difference(other.0))
}
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
#[must_use]
pub const fn symmetric_difference(self, other: Self) -> Self {
Self(self.0.symmetric_difference(other.0))
}
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
#[must_use]
pub const fn complement(self) -> Self {
Self(self.0.complement())
}
}
impl ::bitflags::__private::core::fmt::Binary for PathsToNested {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Binary::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::Octal for PathsToNested {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::Octal::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::LowerHex for PathsToNested {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::LowerHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::fmt::UpperHex for PathsToNested {
fn fmt(&self, f: &mut ::bitflags::__private::core::fmt::Formatter)
-> ::bitflags::__private::core::fmt::Result {
let inner = self.0;
::bitflags::__private::core::fmt::UpperHex::fmt(&inner, f)
}
}
impl ::bitflags::__private::core::ops::BitOr for PathsToNested {
type Output = Self;
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor(self, other: PathsToNested) -> Self { self.union(other) }
}
impl ::bitflags::__private::core::ops::BitOrAssign for PathsToNested {
/// The bitwise or (`|`) of the bits in two flags values.
#[inline]
fn bitor_assign(&mut self, other: Self) { self.insert(other); }
}
impl ::bitflags::__private::core::ops::BitXor for PathsToNested {
type Output = Self;
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor(self, other: Self) -> Self {
self.symmetric_difference(other)
}
}
impl ::bitflags::__private::core::ops::BitXorAssign for PathsToNested
{
/// The bitwise exclusive-or (`^`) of the bits in two flags values.
#[inline]
fn bitxor_assign(&mut self, other: Self) { self.toggle(other); }
}
impl ::bitflags::__private::core::ops::BitAnd for PathsToNested {
type Output = Self;
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand(self, other: Self) -> Self { self.intersection(other) }
}
impl ::bitflags::__private::core::ops::BitAndAssign for PathsToNested
{
/// The bitwise and (`&`) of the bits in two flags values.
#[inline]
fn bitand_assign(&mut self, other: Self) {
*self =
Self::from_bits_retain(self.bits()).intersection(other);
}
}
impl ::bitflags::__private::core::ops::Sub for PathsToNested {
type Output = Self;
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub(self, other: Self) -> Self { self.difference(other) }
}
impl ::bitflags::__private::core::ops::SubAssign for PathsToNested {
/// The intersection of a source flags value with the complement of a target flags value (`&!`).
///
/// This method is not equivalent to `self & !other` when `other` has unknown bits set.
/// `difference` won't truncate `other`, but the `!` operator will.
#[inline]
fn sub_assign(&mut self, other: Self) { self.remove(other); }
}
impl ::bitflags::__private::core::ops::Not for PathsToNested {
type Output = Self;
/// The bitwise negation (`!`) of the bits in a flags value, truncating the result.
#[inline]
fn not(self) -> Self { self.complement() }
}
impl ::bitflags::__private::core::iter::Extend<PathsToNested> for
PathsToNested {
/// The bitwise or (`|`) of the bits in each flags value.
fn extend<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(&mut self, iterator: T) {
for item in iterator { self.insert(item) }
}
}
impl ::bitflags::__private::core::iter::FromIterator<PathsToNested>
for PathsToNested {
/// The bitwise or (`|`) of the bits in each flags value.
fn from_iter<T: ::bitflags::__private::core::iter::IntoIterator<Item
= Self>>(iterator: T) -> Self {
use ::bitflags::__private::core::iter::Extend;
let mut result = Self::empty();
result.extend(iterator);
result
}
}
impl PathsToNested {
/// Yield a set of contained flags values.
///
/// Each yielded flags value will correspond to a defined named flag. Any unknown bits
/// will be yielded together as a final flags value.
#[inline]
pub const fn iter(&self)
-> ::bitflags::iter::Iter<PathsToNested> {
::bitflags::iter::Iter::__private_const_new(<PathsToNested as
::bitflags::Flags>::FLAGS,
PathsToNested::from_bits_retain(self.bits()),
PathsToNested::from_bits_retain(self.bits()))
}
/// Yield a set of contained named flags values.
///
/// This method is like [`iter`](#method.iter), except only yields bits in contained named flags.
/// Any unknown bits, or bits not corresponding to a contained flag will not be yielded.
#[inline]
pub const fn iter_names(&self)
-> ::bitflags::iter::IterNames<PathsToNested> {
::bitflags::iter::IterNames::__private_const_new(<PathsToNested
as ::bitflags::Flags>::FLAGS,
PathsToNested::from_bits_retain(self.bits()),
PathsToNested::from_bits_retain(self.bits()))
}
}
impl ::bitflags::__private::core::iter::IntoIterator for PathsToNested
{
type Item = PathsToNested;
type IntoIter = ::bitflags::iter::Iter<PathsToNested>;
fn into_iter(self) -> Self::IntoIter { self.iter() }
}
};bitflags::bitflags! {
377/// Tracks how nested goals have been accessed. This is necessary to disable
378 /// global cache entries if computing them would otherwise result in a cycle or
379 /// access a provisional cache entry.
380#[derive(Debug, Clone, Copy, PartialEq, Eq)]
381pub struct PathsToNested: u8 {
382/// The initial value when adding a goal to its own nested goals.
383const EMPTY = 1 << 0;
384const INDUCTIVE = 1 << 1;
385const UNKNOWN = 1 << 2;
386const COINDUCTIVE = 1 << 3;
387const FORCED_AMBIGUITY = 1 << 4;
388 }
389}390impl From<PathKind> for PathsToNested {
391fn from(path: PathKind) -> PathsToNested {
392match path {
393 PathKind::Inductive => PathsToNested::INDUCTIVE,
394 PathKind::Unknown => PathsToNested::UNKNOWN,
395 PathKind::Coinductive => PathsToNested::COINDUCTIVE,
396 PathKind::ForcedAmbiguity => PathsToNested::FORCED_AMBIGUITY,
397 }
398 }
399}
400impl PathsToNested {
401/// The implementation of this function is kind of ugly. We check whether
402 /// there currently exist 'weaker' paths in the set, if so we upgrade these
403 /// paths to at least `path`.
404#[must_use]
405fn extend_with(mut self, path: PathKind) -> Self {
406match path {
407 PathKind::Inductive => {
408if self.intersects(PathsToNested::EMPTY) {
409self.remove(PathsToNested::EMPTY);
410self.insert(PathsToNested::INDUCTIVE);
411 }
412 }
413 PathKind::Unknown => {
414if self.intersects(PathsToNested::EMPTY | PathsToNested::INDUCTIVE) {
415self.remove(PathsToNested::EMPTY | PathsToNested::INDUCTIVE);
416self.insert(PathsToNested::UNKNOWN);
417 }
418 }
419 PathKind::Coinductive => {
420if self.intersects(
421PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN,
422 ) {
423self.remove(
424PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN,
425 );
426self.insert(PathsToNested::COINDUCTIVE);
427 }
428 }
429 PathKind::ForcedAmbiguity => {
430if self.intersects(
431PathsToNested::EMPTY432 | PathsToNested::INDUCTIVE433 | PathsToNested::UNKNOWN434 | PathsToNested::COINDUCTIVE,
435 ) {
436self.remove(
437PathsToNested::EMPTY438 | PathsToNested::INDUCTIVE439 | PathsToNested::UNKNOWN440 | PathsToNested::COINDUCTIVE,
441 );
442self.insert(PathsToNested::FORCED_AMBIGUITY);
443 }
444 }
445 }
446447self448 }
449450#[must_use]
451fn extend_with_paths(self, path: PathsToNested) -> Self {
452let mut new = PathsToNested::empty();
453for p in path.iter_paths() {
454 new |= self.extend_with(p);
455 }
456new457 }
458459fn iter_paths(self) -> impl Iterator<Item = PathKind> {
460let (PathKind::Inductive461 | PathKind::Unknown462 | PathKind::Coinductive463 | PathKind::ForcedAmbiguity);
464 [PathKind::Inductive, PathKind::Unknown, PathKind::Coinductive, PathKind::ForcedAmbiguity]
465 .into_iter()
466 .filter(move |&p| self.contains(p.into()))
467 }
468}
469470/// The nested goals of each stack entry and the path from the
471/// stack entry to that nested goal.
472///
473/// They are used when checking whether reevaluating a global cache
474/// would encounter a cycle or use a provisional cache entry given the
475/// current search graph state. We need to disable the global cache
476/// in this case as it could otherwise result in behavioral differences.
477/// Cycles can impact behavior. The cycle ABA may have different final
478/// results from a the cycle BAB depending on the cycle root.
479///
480/// We only start tracking nested goals once we've either encountered
481/// overflow or a solver cycle. This is a performance optimization to
482/// avoid tracking nested goals on the happy path.
483#[automatically_derived]
impl<X: Cx> ::core::clone::Clone for NestedGoals<X> where X: Cx {
#[inline]
fn clone(&self) -> Self {
match self {
NestedGoals { nested_goals: ref __field_nested_goals } =>
NestedGoals {
nested_goals: ::core::clone::Clone::clone(__field_nested_goals),
},
}
}
}#[derive_where(Debug, Default, Clone; X: Cx)]484struct NestedGoals<X: Cx> {
485 nested_goals: HashMap<X::Input, PathsToNested>,
486}
487impl<X: Cx> NestedGoals<X> {
488fn is_empty(&self) -> bool {
489self.nested_goals.is_empty()
490 }
491492fn insert(&mut self, input: X::Input, paths_to_nested: PathsToNested) {
493match self.nested_goals.entry(input) {
494 Entry::Occupied(mut entry) => *entry.get_mut() |= paths_to_nested,
495 Entry::Vacant(entry) => drop(entry.insert(paths_to_nested)),
496 }
497 }
498499/// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
500 /// to the parent goal.
501 ///
502 /// If the path from this goal to the nested goal is inductive, the paths from this goal
503 /// to all nested goals of that nested goal are also inductive. Otherwise the paths are
504 /// the same as for the child.
505fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals<X>) {
506// Each nested goal is updated independently, and `insert` only unions paths for that
507 // goal, so traversal order cannot affect the result.
508#[allow(rustc::potential_query_instability)]
509for (input, paths_to_nested) in nested_goals.iter() {
510let paths_to_nested = paths_to_nested.extend_with(step_kind);
511self.insert(input, paths_to_nested);
512 }
513 }
514515// This helper intentionally exposes unstable hash iteration so each caller must opt in
516 // locally and justify why its traversal is order-insensitive.
517#[cfg_attr(feature = "nightly", rustc_lint_query_instability)]
518 #[allow(rustc::potential_query_instability)]
519fn iter(&self) -> impl Iterator<Item = (X::Input, PathsToNested)> + '_ {
520self.nested_goals.iter().map(|(i, p)| (*i, *p))
521 }
522523fn contains(&self, input: X::Input) -> bool {
524self.nested_goals.contains_key(&input)
525 }
526}
527528/// A provisional result of an already computed goals which depends on other
529/// goals still on the stack.
530#[automatically_derived]
impl<X: Cx> ::core::fmt::Debug for ProvisionalCacheEntry<X> where X: Cx {
fn fmt(&self, __f: &mut ::core::fmt::Formatter<'_>)
-> ::core::fmt::Result {
match self {
ProvisionalCacheEntry {
encountered_overflow: ref __field_encountered_overflow,
heads: ref __field_heads,
path_from_head: ref __field_path_from_head,
result: ref __field_result } => {
let mut __builder =
::core::fmt::Formatter::debug_struct(__f,
"ProvisionalCacheEntry");
::core::fmt::DebugStruct::field(&mut __builder,
"encountered_overflow", __field_encountered_overflow);
::core::fmt::DebugStruct::field(&mut __builder, "heads",
__field_heads);
::core::fmt::DebugStruct::field(&mut __builder,
"path_from_head", __field_path_from_head);
::core::fmt::DebugStruct::field(&mut __builder, "result",
__field_result);
::core::fmt::DebugStruct::finish(&mut __builder)
}
}
}
}#[derive_where(Debug; X: Cx)]531struct ProvisionalCacheEntry<X: Cx> {
532/// Whether evaluating the goal encountered overflow. This is used to
533 /// disable the cache entry except if the last goal on the stack is
534 /// already involved in this cycle.
535encountered_overflow: bool,
536/// All cycle heads this cache entry depends on.
537heads: CycleHeads,
538/// The path from the highest cycle head to this goal. This differs from
539 /// `heads` which tracks the path to the cycle head *from* this goal.
540path_from_head: PathKind,
541 result: X::Result,
542}
543544/// The final result of evaluating a goal.
545///
546/// We reset `encountered_overflow` when reevaluating a goal,
547/// but need to track whether we've hit the recursion limit at
548/// all for correctness.
549///
550/// We've previously simply returned the final `StackEntry` but this
551/// made it easy to accidentally drop information from the previous
552/// evaluation.
553#[automatically_derived]
impl<X: Cx> ::core::fmt::Debug for EvaluationResult<X> where X: Cx {
fn fmt(&self, __f: &mut ::core::fmt::Formatter<'_>)
-> ::core::fmt::Result {
match self {
EvaluationResult {
encountered_overflow: ref __field_encountered_overflow,
required_depth: ref __field_required_depth,
heads: ref __field_heads,
nested_goals: ref __field_nested_goals,
result: ref __field_result } => {
let mut __builder =
::core::fmt::Formatter::debug_struct(__f,
"EvaluationResult");
::core::fmt::DebugStruct::field(&mut __builder,
"encountered_overflow", __field_encountered_overflow);
::core::fmt::DebugStruct::field(&mut __builder,
"required_depth", __field_required_depth);
::core::fmt::DebugStruct::field(&mut __builder, "heads",
__field_heads);
::core::fmt::DebugStruct::field(&mut __builder,
"nested_goals", __field_nested_goals);
::core::fmt::DebugStruct::field(&mut __builder, "result",
__field_result);
::core::fmt::DebugStruct::finish(&mut __builder)
}
}
}
}#[derive_where(Debug; X: Cx)]554struct EvaluationResult<X: Cx> {
555 encountered_overflow: bool,
556 required_depth: usize,
557 heads: CycleHeads,
558 nested_goals: NestedGoals<X>,
559 result: X::Result,
560}
561562impl<X: Cx> EvaluationResult<X> {
563fn finalize(
564 final_entry: StackEntry<X>,
565 encountered_overflow: bool,
566 result: X::Result,
567 ) -> EvaluationResult<X> {
568EvaluationResult {
569encountered_overflow,
570// Unlike `encountered_overflow`, we share `heads`, `required_depth`,
571 // and `nested_goals` between evaluations.
572required_depth: final_entry.required_depth,
573 heads: final_entry.heads,
574 nested_goals: final_entry.nested_goals,
575// We only care about the final result.
576result,
577 }
578 }
579}
580581pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
582 root_depth: AvailableDepth,
583 stack: Stack<X>,
584/// The provisional cache contains entries for already computed goals which
585 /// still depend on goals higher-up in the stack. We don't move them to the
586 /// global cache and track them locally instead. A provisional cache entry
587 /// is only valid until the result of one of its cycle heads changes.
588provisional_cache: HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,
589590 _marker: PhantomData<D>,
591}
592593/// While [`SearchGraph::update_parent_goal`] can be mostly shared between
594/// ordinary nested goals/global cache hits and provisional cache hits,
595/// using the provisional cache should not add any nested goals.
596///
597/// `nested_goals` are only used when checking whether global cache entries
598/// are applicable. This only cares about whether a goal is actually accessed.
599/// Given that the usage of the provisional cache is fully deterministic, we
600/// don't need to track the nested goals used while computing a provisional
601/// cache entry.
602enum UpdateParentGoalCtxt<'a, X: Cx> {
603 Ordinary(&'a NestedGoals<X>),
604 CycleOnStack(X::Input),
605 ProvisionalCacheHit,
606}
607608impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
609pub fn new(root_depth: usize) -> SearchGraph<D> {
610Self {
611 root_depth: AvailableDepth(root_depth),
612 stack: Default::default(),
613 provisional_cache: Default::default(),
614 _marker: PhantomData,
615 }
616 }
617618/// Lazily update the stack entry for the parent goal.
619 /// This behavior is shared between actually evaluating goals
620 /// and using existing global cache entries to make sure they
621 /// have the same impact on the remaining evaluation.
622fn update_parent_goal(
623 stack: &mut Stack<X>,
624 step_kind_from_parent: PathKind,
625 required_depth_for_nested: usize,
626 heads: impl Iterator<Item = (StackDepth, CycleHead)>,
627 encountered_overflow: bool,
628 context: UpdateParentGoalCtxt<'_, X>,
629 ) {
630if let Some((parent_index, parent)) = stack.last_mut_with_index() {
631parent.required_depth = parent.required_depth.max(required_depth_for_nested + 1);
632parent.encountered_overflow |= encountered_overflow;
633634for (head_index, head) in heads {
635if let Some(candidate_usages) = &mut parent.candidate_usages {
636 candidate_usages
637 .usages
638 .get_or_insert_default()
639 .entry(head_index)
640 .or_default()
641 .add_usages_from_nested(head.usages);
642 }
643match head_index.cmp(&parent_index) {
644 Ordering::Less => parent.heads.insert(
645 head_index,
646 head.paths_to_head.extend_with(step_kind_from_parent),
647 head.usages,
648 ),
649 Ordering::Equal => {
650 parent.usages.get_or_insert_default().add_usages_from_nested(head.usages);
651 }
652 Ordering::Greater => ::core::panicking::panic("internal error: entered unreachable code")unreachable!(),
653 }
654 }
655let parent_depends_on_cycle = match context {
656 UpdateParentGoalCtxt::Ordinary(nested_goals) => {
657parent.nested_goals.extend_from_child(step_kind_from_parent, nested_goals);
658 !nested_goals.is_empty()
659 }
660 UpdateParentGoalCtxt::CycleOnStack(head) => {
661// We lookup provisional cache entries before detecting cycles.
662 // We therefore can't use a global cache entry if it contains a cycle
663 // whose head is in the provisional cache.
664parent.nested_goals.insert(head, step_kind_from_parent.into());
665true
666}
667 UpdateParentGoalCtxt::ProvisionalCacheHit => true,
668 };
669// Once we've got goals which encountered overflow or a cycle,
670 // we track all goals whose behavior may depend depend on these
671 // goals as this change may cause them to now depend on additional
672 // goals, resulting in new cycles. See the dev-guide for examples.
673if parent_depends_on_cycle {
674parent.nested_goals.insert(parent.input, PathsToNested::EMPTY);
675 }
676 }
677 }
678679pub fn is_empty(&self) -> bool {
680if self.stack.is_empty() {
681if true {
if !self.provisional_cache.is_empty() {
::core::panicking::panic("assertion failed: self.provisional_cache.is_empty()")
};
};debug_assert!(self.provisional_cache.is_empty());
682true
683} else {
684false
685}
686 }
687688/// The number of goals currently in the search graph. This should only be
689 /// used for debugging purposes.
690pub fn debug_current_depth(&self) -> usize {
691self.stack.len()
692 }
693694/// Whether the path from `head` to the current stack entry is inductive or coinductive.
695 ///
696 /// The `step_kind_to_head` is used to add a single additional path segment to the path on
697 /// the stack which completes the cycle. This given an inductive step AB which then cycles
698 /// coinductively with A, we need to treat this cycle as coinductive.
699fn cycle_path_kind(
700 stack: &Stack<X>,
701 step_kind_to_head: PathKind,
702 head: StackDepth,
703 ) -> PathKind {
704stack.cycle_step_kinds(head).fold(step_kind_to_head, |curr, step| curr.extend(step))
705 }
706707pub fn enter_single_candidate(&mut self) {
708let prev = self.stack.last_mut().unwrap().candidate_usages.replace(Default::default());
709if true {
if !prev.is_none() {
{
::core::panicking::panic_fmt(format_args!("existing candidate_usages: {0:?}",
prev));
}
};
};debug_assert!(prev.is_none(), "existing candidate_usages: {prev:?}");
710 }
711712pub fn finish_single_candidate(&mut self) -> CandidateHeadUsages {
713self.stack.last_mut().unwrap().candidate_usages.take().unwrap()
714 }
715716pub fn ignore_candidate_head_usages(&mut self, usages: CandidateHeadUsages) {
717if let Some(usages) = usages.usages {
718let (entry_index, entry) = self.stack.last_mut_with_index().unwrap();
719// Ignoring usages only mutates the state for the current `head_index`, so the
720 // resulting per-head state is unchanged by iteration order.
721#[allow(rustc::potential_query_instability)]
722for (head_index, usages) in usages.into_iter() {
723if head_index == entry_index {
724 entry.usages.unwrap().ignore_usages(usages);
725 } else {
726 entry.heads.ignore_usages(head_index, usages);
727 }
728 }
729 }
730 }
731732pub fn evaluate_root_goal_for_proof_tree(
733 cx: X,
734 root_depth: usize,
735 input: X::Input,
736 inspect: &mut D::ProofTreeBuilder,
737 ) -> X::Result {
738let mut this = SearchGraph::<D>::new(root_depth);
739let available_depth = AvailableDepth(root_depth);
740let step_kind_from_parent = PathKind::Inductive; // is never used
741this.stack.push(StackEntry {
742input,
743step_kind_from_parent,
744available_depth,
745 provisional_result: None,
746 required_depth: 0,
747 heads: Default::default(),
748 encountered_overflow: false,
749 usages: None,
750 candidate_usages: None,
751 nested_goals: Default::default(),
752 });
753let evaluation_result = this.evaluate_goal_in_task(cx, input, inspect);
754evaluation_result.result
755 }
756757/// Probably the most involved method of the whole solver.
758 ///
759 /// While goals get computed via `D::compute_goal`, this function handles
760 /// caching, overflow, and cycles.
761x;#[instrument(level = "debug", skip(self, cx, inspect), ret)]762pub fn evaluate_goal(
763&mut self,
764 cx: X,
765 input: X::Input,
766 step_kind_from_parent: PathKind,
767 inspect: &mut D::ProofTreeBuilder,
768 ) -> X::Result {
769let Some(available_depth) =
770 AvailableDepth::allowed_depth_for_nested::<D>(self.root_depth, &self.stack)
771else {
772return self.handle_overflow(cx, input);
773 };
774775// We check the provisional cache before checking the global cache. This simplifies
776 // the implementation as we can avoid worrying about cases where both the global and
777 // provisional cache may apply, e.g. consider the following example
778 //
779 // - xxBA overflow
780 // - A
781 // - BA cycle
782 // - CB :x:
783if let Some(result) = self.lookup_provisional_cache(input, step_kind_from_parent) {
784return result;
785 }
786787// Lookup the global cache unless we're building proof trees or are currently
788 // fuzzing.
789let validate_cache = if !D::inspect_is_noop(inspect) {
790None
791} else if let Some(scope) = D::enter_validation_scope(cx, input) {
792// When validating the global cache we need to track the goals for which the
793 // global cache has been disabled as it may otherwise change the result for
794 // cyclic goals. We don't care about goals which are not on the current stack
795 // so it's fine to drop their scope eagerly.
796self.lookup_global_cache_untracked(cx, input, step_kind_from_parent, available_depth)
797 .inspect(|expected| debug!(?expected, "validate cache entry"))
798 .map(|r| (scope, r))
799 } else if let Some(result) =
800self.lookup_global_cache(cx, input, step_kind_from_parent, available_depth)
801 {
802return result;
803 } else {
804None
805};
806807// Detect cycles on the stack. We do this after the global cache lookup to
808 // avoid iterating over the stack in case a goal has already been computed.
809 // This may not have an actual performance impact and we could reorder them
810 // as it may reduce the number of `nested_goals` we need to track.
811if let Some(result) = self.check_cycle_on_stack(cx, input, step_kind_from_parent) {
812debug_assert!(validate_cache.is_none(), "global cache and cycle on stack: {input:?}");
813return result;
814 }
815816// Unfortunate, it looks like we actually have to compute this goal.
817self.stack.push(StackEntry {
818 input,
819 step_kind_from_parent,
820 available_depth,
821 provisional_result: None,
822 required_depth: 0,
823 heads: Default::default(),
824 encountered_overflow: false,
825 usages: None,
826 candidate_usages: None,
827 nested_goals: Default::default(),
828 });
829830// This is for global caching, so we properly track query dependencies.
831 // Everything that affects the `result` should be performed within this
832 // `with_cached_task` closure. If computing this goal depends on something
833 // not tracked by the cache key and from outside of this anon task, it
834 // must not be added to the global cache. Notably, this is the case for
835 // trait solver cycles participants.
836let (evaluation_result, dep_node) =
837 cx.with_cached_task(|| self.evaluate_goal_in_task(cx, input, inspect));
838839// We've finished computing the goal and have popped it from the stack,
840 // lazily update its parent goal.
841Self::update_parent_goal(
842&mut self.stack,
843 step_kind_from_parent,
844 evaluation_result.required_depth,
845 evaluation_result.heads.iter(),
846 evaluation_result.encountered_overflow,
847 UpdateParentGoalCtxt::Ordinary(&evaluation_result.nested_goals),
848 );
849let result = evaluation_result.result;
850851// We're now done with this goal. We only add the root of cycles to the global cache.
852 // In case this goal is involved in a larger cycle add it to the provisional cache.
853if evaluation_result.heads.is_empty() {
854if let Some((_scope, expected)) = validate_cache {
855// Do not try to move a goal into the cache again if we're testing
856 // the global cache.
857assert_eq!(expected, evaluation_result.result, "input={input:?}");
858 } else if D::inspect_is_noop(inspect) {
859self.insert_global_cache(cx, input, evaluation_result, dep_node)
860 }
861 } else if D::ENABLE_PROVISIONAL_CACHE {
862debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
863let entry = self.provisional_cache.entry(input).or_default();
864let EvaluationResult {
865 encountered_overflow,
866 required_depth: _,
867 heads,
868 nested_goals: _,
869 result,
870 } = evaluation_result;
871let path_from_head = Self::cycle_path_kind(
872&self.stack,
873 step_kind_from_parent,
874 heads.highest_cycle_head_index(),
875 );
876let provisional_cache_entry =
877 ProvisionalCacheEntry { encountered_overflow, heads, path_from_head, result };
878debug!(?provisional_cache_entry);
879 entry.push(provisional_cache_entry);
880 } else {
881debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
882 }
883884 result
885 }
886887fn handle_overflow(&mut self, cx: X, input: X::Input) -> X::Result {
888if let Some(last) = self.stack.last_mut() {
889last.encountered_overflow = true;
890// If computing a goal `B` depends on another goal `A` and
891 // `A` has a nested goal which overflows, then computing `B`
892 // at the same depth, but with `A` already on the stack,
893 // would encounter a solver cycle instead, potentially
894 // changing the result.
895 //
896 // We must therefore not use the global cache entry for `B` in that case.
897 // See tests/ui/traits/next-solver/cycles/hidden-by-overflow.rs
898last.nested_goals.insert(last.input, PathsToNested::EMPTY);
899 }
900901{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:901",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(901u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("encountered stack overflow")
as &dyn Value))])
});
} else { ; }
};debug!("encountered stack overflow");
902 D::stack_overflow_result(cx, input)
903 }
904905/// When reevaluating a goal with a changed provisional result, all provisional cache entry
906 /// which depend on this goal get invalidated.
907 ///
908 /// Note that we keep provisional cache entries which accessed this goal as a cycle head, but
909 /// don't depend on its value.
910fn clear_dependent_provisional_results_for_rerun(&mut self) {
911let rerun_index = self.stack.next_index();
912// Each cached entry is filtered independently based on whether it depends on
913 // `rerun_index`, so bucket traversal order does not matter.
914#[allow(rustc::potential_query_instability)]
915self.provisional_cache.retain(|_, entries| {
916entries.retain(|entry| {
917let (head_index, head) = entry.heads.highest_cycle_head();
918head_index != rerun_index || head.usages.is_empty()
919 });
920 !entries.is_empty()
921 });
922 }
923}
924925/// We need to rebase provisional cache entries when popping one of their cycle
926/// heads from the stack. This may not necessarily mean that we've actually
927/// reached a fixpoint for that cycle head, which impacts the way we rebase
928/// provisional cache entries.
929#[automatically_derived]
impl<X: Cx> ::core::fmt::Debug for RebaseReason<X> where X: Cx {
fn fmt(&self, __f: &mut ::core::fmt::Formatter<'_>)
-> ::core::fmt::Result {
match self {
RebaseReason::NoCycleUsages =>
::core::fmt::Formatter::write_str(__f, "NoCycleUsages"),
RebaseReason::Ambiguity(ref __field_0) => {
let mut __builder =
::core::fmt::Formatter::debug_tuple(__f, "Ambiguity");
::core::fmt::DebugTuple::field(&mut __builder, __field_0);
::core::fmt::DebugTuple::finish(&mut __builder)
}
RebaseReason::Overflow =>
::core::fmt::Formatter::write_str(__f, "Overflow"),
RebaseReason::ReachedFixpoint(ref __field_0) => {
let mut __builder =
::core::fmt::Formatter::debug_tuple(__f, "ReachedFixpoint");
::core::fmt::DebugTuple::field(&mut __builder, __field_0);
::core::fmt::DebugTuple::finish(&mut __builder)
}
}
}
}#[derive_where(Debug; X: Cx)]930enum RebaseReason<X: Cx> {
931 NoCycleUsages,
932 Ambiguity(X::AmbiguityInfo),
933 Overflow,
934/// We've actually reached a fixpoint.
935 ///
936 /// This either happens in the first evaluation step for the cycle head.
937 /// In this case the used provisional result depends on the cycle `PathKind`.
938 /// We store this path kind to check whether the provisional cache entry
939 /// we're rebasing relied on the same cycles.
940 ///
941 /// In later iterations cycles always return `stack_entry.provisional_result`
942 /// so we no longer depend on the `PathKind`. We store `None` in that case.
943ReachedFixpoint(Option<PathKind>),
944}
945946impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D, X> {
947/// A necessary optimization to handle complex solver cycles. A provisional cache entry
948 /// relies on a set of cycle heads and the path towards these heads. When popping a cycle
949 /// head from the stack after we've finished computing it, we can't be sure that the
950 /// provisional cache entry is still applicable. We need to keep the cache entries to
951 /// prevent hangs.
952 ///
953 /// This can be thought of as pretending to reevaluate the popped head as nested goals
954 /// of this provisional result. For this to be correct, all cycles encountered while
955 /// we'd reevaluate the cycle head as a nested goal must keep the same cycle kind.
956 /// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html).
957 ///
958 /// In case the popped cycle head failed to reach a fixpoint anything which depends on
959 /// its provisional result is invalid. Actually discarding provisional cache entries in
960 /// this case would cause hangs, so we instead change the result of dependant provisional
961 /// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested
962 /// goals whose result doesn't actually depend on this cycle head, but that's acceptable
963 /// to me.
964#[allow(clippy :: suspicious_else_formatting)]
{
let __tracing_attr_span;
let __tracing_attr_guard;
if ::tracing::Level::TRACE <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() ||
{ false } {
__tracing_attr_span =
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("rebase_provisional_cache_entries",
"rustc_type_ir::search_graph", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(964u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["stack_entry",
"rebase_reason"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::SPAN)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let mut interest = ::tracing::subscriber::Interest::never();
if ::tracing::Level::TRACE <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{ interest = __CALLSITE.interest(); !interest.is_never() }
&&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest) {
let meta = __CALLSITE.metadata();
::tracing::Span::new(meta,
&{
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = meta.fields().iter();
meta.fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&::tracing::field::debug(&stack_entry)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&::tracing::field::debug(&rebase_reason)
as &dyn Value))])
})
} else {
let span =
::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
{};
span
}
};
__tracing_attr_guard = __tracing_attr_span.enter();
}
#[warn(clippy :: suspicious_else_formatting)]
{
#[allow(unknown_lints, unreachable_code, clippy ::
diverging_sub_expression, clippy :: empty_loop, clippy ::
let_unit_value, clippy :: let_with_type_underscore, clippy ::
needless_return, clippy :: unreachable)]
if false {
let __tracing_attr_fake_return: () = loop {};
return __tracing_attr_fake_return;
}
{
let popped_head_index = self.stack.next_index();
#[allow(rustc::potential_query_instability)]
self.provisional_cache.retain(|&input, entries|
{
entries.retain_mut(|entry|
{
let ProvisionalCacheEntry {
encountered_overflow: _, heads, path_from_head, result } =
entry;
let popped_head =
if heads.highest_cycle_head_index() == popped_head_index {
heads.remove_highest_cycle_head()
} else {
if true {
if !(heads.highest_cycle_head_index() < popped_head_index) {
::core::panicking::panic("assertion failed: heads.highest_cycle_head_index() < popped_head_index")
};
};
return true;
};
if popped_head.usages.is_empty() {
for (head_index, _) in stack_entry.heads.iter() {
heads.insert(head_index, PathsToNested::EMPTY,
HeadUsages::default());
}
} else {
let ep = popped_head.paths_to_head;
for (head_index, head) in stack_entry.heads.iter() {
let ph = head.paths_to_head;
let hp =
Self::cycle_path_kind(&self.stack,
stack_entry.step_kind_from_parent, head_index);
let he = hp.extend(*path_from_head);
for ph in ph.iter_paths() {
let hph = hp.extend(ph);
for ep in ep.iter_paths() {
let hep = ep.extend(he);
let heph = hep.extend(ph);
if hph != heph { return false; }
}
}
let eph = ep.extend_with_paths(ph);
heads.insert(head_index, eph, head.usages);
}
match rebase_reason {
RebaseReason::NoCycleUsages => return false,
RebaseReason::Ambiguity(info) => {
*result = D::propagate_ambiguity(cx, input, info);
}
RebaseReason::Overflow =>
*result = D::fixpoint_overflow_result(cx, input),
RebaseReason::ReachedFixpoint(None) => {}
RebaseReason::ReachedFixpoint(Some(path_kind)) => {
if !popped_head.usages.is_single(path_kind) {
return false;
}
}
};
}
let Some(new_highest_head_index) =
heads.opt_highest_cycle_head_index() else { return false; };
*path_from_head =
path_from_head.extend(Self::cycle_path_kind(&self.stack,
stack_entry.step_kind_from_parent, new_highest_head_index));
{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1075",
"rustc_type_ir::search_graph", ::tracing::Level::TRACE,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1075u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message", "input",
"entry"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::TRACE <=
::tracing::level_filters::STATIC_MAX_LEVEL &&
::tracing::Level::TRACE <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("rebased provisional cache entry")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&input) as
&dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&entry) as
&dyn Value))])
});
} else { ; }
};
true
});
!entries.is_empty()
});
}
}
}#[instrument(level = "trace", skip(self, cx))]965fn rebase_provisional_cache_entries(
966&mut self,
967 cx: X,
968 stack_entry: &StackEntry<X>,
969 rebase_reason: RebaseReason<X>,
970 ) {
971let popped_head_index = self.stack.next_index();
972// Rebasing decisions depend only on each provisional entry and the current stack state,
973 // so traversing the cache in hash order cannot change the final cache contents.
974#[allow(rustc::potential_query_instability)]
975self.provisional_cache.retain(|&input, entries| {
976 entries.retain_mut(|entry| {
977let ProvisionalCacheEntry {
978 encountered_overflow: _,
979 heads,
980 path_from_head,
981 result,
982 } = entry;
983let popped_head = if heads.highest_cycle_head_index() == popped_head_index {
984 heads.remove_highest_cycle_head()
985 } else {
986debug_assert!(heads.highest_cycle_head_index() < popped_head_index);
987return true;
988 };
989990// We're rebasing an entry `e` over a head `p`. This head
991 // has a number of own heads `h` it depends on.
992 //
993 // This causes our provisional result to depend on the heads
994 // of `p` to avoid moving any goal which uses this cache entry to
995 // the global cache.
996if popped_head.usages.is_empty() {
997// The result of `e` does not depend on the value of `p`. This we can
998 // keep using the result of this provisional cache entry even if evaluating
999 // `p` as a nested goal of `e` would have a different result.
1000for (head_index, _) in stack_entry.heads.iter() {
1001 heads.insert(head_index, PathsToNested::EMPTY, HeadUsages::default());
1002 }
1003 } else {
1004// The entry `e` actually depends on the value of `p`. We need
1005 // to make sure that the value of `p` wouldn't change even if we
1006 // were to reevaluate it as a nested goal of `e` instead. For this
1007 // we check that the path kind of all paths `hph` remain the
1008 // same after rebasing.
1009 //
1010 // After rebasing the cycles `hph` will go through `e`. We need to make
1011 // sure that forall possible paths `hep`, `heph` is equal to `hph.`
1012let ep = popped_head.paths_to_head;
1013for (head_index, head) in stack_entry.heads.iter() {
1014let ph = head.paths_to_head;
1015let hp = Self::cycle_path_kind(
1016&self.stack,
1017 stack_entry.step_kind_from_parent,
1018 head_index,
1019 );
1020// We first validate that all cycles while computing `p` would stay
1021 // the same if we were to recompute it as a nested goal of `e`.
1022let he = hp.extend(*path_from_head);
1023for ph in ph.iter_paths() {
1024let hph = hp.extend(ph);
1025for ep in ep.iter_paths() {
1026let hep = ep.extend(he);
1027let heph = hep.extend(ph);
1028if hph != heph {
1029return false;
1030 }
1031 }
1032 }
10331034// If so, all paths reached while computing `p` have to get added
1035 // the heads of `e` to make sure that rebasing `e` again also considers
1036 // them.
1037let eph = ep.extend_with_paths(ph);
1038 heads.insert(head_index, eph, head.usages);
1039 }
10401041// The provisional cache entry does depend on the provisional result
1042 // of the popped cycle head. We need to mutate the result of our
1043 // provisional cache entry in case we did not reach a fixpoint.
1044match rebase_reason {
1045// If the cycle head does not actually depend on itself, then
1046 // the provisional result used by the provisional cache entry
1047 // is not actually equal to the final provisional result. We
1048 // need to discard the provisional cache entry in this case.
1049RebaseReason::NoCycleUsages => return false,
1050 RebaseReason::Ambiguity(info) => {
1051*result = D::propagate_ambiguity(cx, input, info);
1052 }
1053 RebaseReason::Overflow => *result = D::fixpoint_overflow_result(cx, input),
1054 RebaseReason::ReachedFixpoint(None) => {}
1055 RebaseReason::ReachedFixpoint(Some(path_kind)) => {
1056if !popped_head.usages.is_single(path_kind) {
1057return false;
1058 }
1059 }
1060 };
1061 }
10621063let Some(new_highest_head_index) = heads.opt_highest_cycle_head_index() else {
1064return false;
1065 };
10661067// We now care about the path from the next highest cycle head to the
1068 // provisional cache entry.
1069*path_from_head = path_from_head.extend(Self::cycle_path_kind(
1070&self.stack,
1071 stack_entry.step_kind_from_parent,
1072 new_highest_head_index,
1073 ));
10741075trace!(?input, ?entry, "rebased provisional cache entry");
10761077true
1078});
1079 !entries.is_empty()
1080 });
1081 }
10821083fn lookup_provisional_cache(
1084&mut self,
1085 input: X::Input,
1086 step_kind_from_parent: PathKind,
1087 ) -> Option<X::Result> {
1088if !D::ENABLE_PROVISIONAL_CACHE {
1089return None;
1090 }
10911092let entries = self.provisional_cache.get(&input)?;
1093for &ProvisionalCacheEntry { encountered_overflow, ref heads, path_from_head, result } in
1094entries
1095 {
1096let head_index = heads.highest_cycle_head_index();
1097if encountered_overflow {
1098// This check is overly strict and very subtle. We need to make sure that if
1099 // a global cache entry depends on some goal without adding it to its
1100 // `nested_goals`, that goal must never have an applicable provisional
1101 // cache entry to avoid incorrectly applying the cache entry.
1102 //
1103 // As we'd have to otherwise track literally all nested goals, we only
1104 // apply provisional cache entries which encountered overflow once the
1105 // current goal is already part of the same cycle. This check could be
1106 // improved but seems to be good enough for now.
1107let last = self.stack.last().unwrap();
1108if last.heads.opt_lowest_cycle_head_index().is_none_or(|lowest| lowest > head_index)
1109 {
1110continue;
1111 }
1112 }
11131114// A provisional cache entry is only valid if the current path from its
1115 // highest cycle head to the goal is the same.
1116if path_from_head
1117 == Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index)
1118 {
1119Self::update_parent_goal(
1120&mut self.stack,
1121 step_kind_from_parent,
11220,
1123 heads.iter(),
1124 encountered_overflow,
1125 UpdateParentGoalCtxt::ProvisionalCacheHit,
1126 );
1127{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1127",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1127u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message",
"head_index", "path_from_head"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("provisional cache hit")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&head_index)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&path_from_head)
as &dyn Value))])
});
} else { ; }
};debug!(?head_index, ?path_from_head, "provisional cache hit");
1128return Some(result);
1129 }
1130 }
11311132None1133 }
11341135/// Even if there is a global cache entry for a given goal, we need to make sure
1136 /// evaluating this entry would not have ended up depending on either a goal
1137 /// already on the stack or a provisional cache entry.
1138fn candidate_is_applicable(
1139&self,
1140 step_kind_from_parent: PathKind,
1141 nested_goals: &NestedGoals<X>,
1142 ) -> bool {
1143// If the global cache entry didn't depend on any nested goals, it always
1144 // applies.
1145if nested_goals.is_empty() {
1146return true;
1147 }
11481149// If a nested goal of the global cache entry is on the stack, we would
1150 // definitely encounter a cycle.
1151if self.stack.iter().any(|e| nested_goals.contains(e.input)) {
1152{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1152",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1152u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("cache entry not applicable due to stack")
as &dyn Value))])
});
} else { ; }
};debug!("cache entry not applicable due to stack");
1153return false;
1154 }
11551156// The global cache entry is also invalid if there's a provisional cache entry
1157 // would apply for any of its nested goals.
1158 // Any matching provisional entry rejects the candidate,
1159 // so iteration order only affects when we return `false`, not the final answer.
1160#[allow(rustc::potential_query_instability)]
1161for (input, path_from_global_entry) in nested_goals.iter() {
1162let Some(entries) = self.provisional_cache.get(&input) else {
1163continue;
1164 };
11651166{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1166",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1166u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message", "input",
"path_from_global_entry", "entries"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("candidate_is_applicable")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&input) as
&dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&path_from_global_entry)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&entries) as
&dyn Value))])
});
} else { ; }
};debug!(?input, ?path_from_global_entry, ?entries, "candidate_is_applicable");
1167// A provisional cache entry is applicable if the path to
1168 // its highest cycle head is equal to the expected path.
1169for &ProvisionalCacheEntry {
1170 encountered_overflow,
1171ref heads,
1172 path_from_head: head_to_provisional,
1173 result: _,
1174 } in entries.iter()
1175 {
1176// We don't have to worry about provisional cache entries which encountered
1177 // overflow, see the relevant comment in `lookup_provisional_cache`.
1178if encountered_overflow {
1179continue;
1180 }
11811182// A provisional cache entry only applies if the path from its highest head
1183 // matches the path when encountering the goal.
1184 //
1185 // We check if any of the paths taken while computing the global goal
1186 // would end up with an applicable provisional cache entry.
1187let head_index = heads.highest_cycle_head_index();
1188let head_to_curr =
1189Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index);
1190let full_paths = path_from_global_entry.extend_with(head_to_curr);
1191if full_paths.contains(head_to_provisional.into()) {
1192{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1192",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1192u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message",
"full_paths", "head_to_provisional"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("cache entry not applicable due to matching paths")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&full_paths)
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&head_to_provisional)
as &dyn Value))])
});
} else { ; }
};debug!(
1193?full_paths,
1194?head_to_provisional,
1195"cache entry not applicable due to matching paths"
1196);
1197return false;
1198 }
1199 }
1200 }
12011202true
1203}
12041205/// Used when fuzzing the global cache. Accesses the global cache without
1206 /// updating the state of the search graph.
1207fn lookup_global_cache_untracked(
1208&self,
1209 cx: X,
1210 input: X::Input,
1211 step_kind_from_parent: PathKind,
1212 available_depth: AvailableDepth,
1213 ) -> Option<X::Result> {
1214cx.with_global_cache(|cache| {
1215cache1216 .get(cx, input, available_depth, |nested_goals| {
1217self.candidate_is_applicable(step_kind_from_parent, nested_goals)
1218 })
1219 .map(|c| c.result)
1220 })
1221 }
12221223/// Try to fetch a previously computed result from the global cache,
1224 /// making sure to only do so if it would match the result of reevaluating
1225 /// this goal.
1226fn lookup_global_cache(
1227&mut self,
1228 cx: X,
1229 input: X::Input,
1230 step_kind_from_parent: PathKind,
1231 available_depth: AvailableDepth,
1232 ) -> Option<X::Result> {
1233cx.with_global_cache(|cache| {
1234let CacheData { result, required_depth, encountered_overflow, nested_goals } = cache1235 .get(cx, input, available_depth, |nested_goals| {
1236self.candidate_is_applicable(step_kind_from_parent, nested_goals)
1237 })?;
12381239// We don't move cycle participants to the global cache, so the
1240 // cycle heads are always empty.
1241let heads = iter::empty();
1242Self::update_parent_goal(
1243&mut self.stack,
1244step_kind_from_parent,
1245required_depth,
1246heads,
1247encountered_overflow,
1248 UpdateParentGoalCtxt::Ordinary(nested_goals),
1249 );
12501251{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1251",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1251u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message",
"required_depth"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("global cache hit")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&required_depth)
as &dyn Value))])
});
} else { ; }
};debug!(?required_depth, "global cache hit");
1252Some(result)
1253 })
1254 }
12551256fn check_cycle_on_stack(
1257&mut self,
1258 cx: X,
1259 input: X::Input,
1260 step_kind_from_parent: PathKind,
1261 ) -> Option<X::Result> {
1262let head_index = self.stack.find(input)?;
1263// We have a nested goal which directly relies on a goal deeper in the stack.
1264 //
1265 // We start by tagging all cycle participants, as that's necessary for caching.
1266 //
1267 // Finally we can return either the provisional response or the initial response
1268 // in case we're in the first fixpoint iteration for this goal.
1269let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head_index);
1270{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1270",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1270u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message",
"path_kind"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("encountered cycle with depth {0:?}",
head_index) as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&path_kind)
as &dyn Value))])
});
} else { ; }
};debug!(?path_kind, "encountered cycle with depth {head_index:?}");
1271let mut usages = HeadUsages::default();
1272usages.add_usage(path_kind);
1273let head = CycleHead { paths_to_head: step_kind_from_parent.into(), usages };
1274Self::update_parent_goal(
1275&mut self.stack,
1276step_kind_from_parent,
12770,
1278 iter::once((head_index, head)),
1279false,
1280 UpdateParentGoalCtxt::CycleOnStack(input),
1281 );
12821283// Return the provisional result or, if we're in the first iteration,
1284 // start with no constraints.
1285if let Some(result) = self.stack[head_index].provisional_result {
1286Some(result)
1287 } else {
1288Some(D::initial_provisional_result(cx, path_kind, input))
1289 }
1290 }
12911292/// Whether we've reached a fixpoint when evaluating a cycle head.
1293x;#[instrument(level = "trace", skip(self, stack_entry), ret)]1294fn reached_fixpoint(
1295&mut self,
1296 stack_entry: &StackEntry<X>,
1297 usages: HeadUsages,
1298 result: X::Result,
1299 ) -> Result<Option<PathKind>, ()> {
1300let provisional_result = stack_entry.provisional_result;
1301if let Some(provisional_result) = provisional_result {
1302if provisional_result == result { Ok(None) } else { Err(()) }
1303 } else if let Some(path_kind) = D::is_initial_provisional_result(result)
1304 .filter(|&path_kind| usages.is_single(path_kind))
1305 {
1306Ok(Some(path_kind))
1307 } else {
1308Err(())
1309 }
1310 }
13111312/// When we encounter a coinductive cycle, we have to fetch the
1313 /// result of that cycle while we are still computing it. Because
1314 /// of this we continuously recompute the cycle until the result
1315 /// of the previous iteration is equal to the final result, at which
1316 /// point we are done.
1317fn evaluate_goal_in_task(
1318&mut self,
1319 cx: X,
1320 input: X::Input,
1321 inspect: &mut D::ProofTreeBuilder,
1322 ) -> EvaluationResult<X> {
1323// We reset `encountered_overflow` each time we rerun this goal
1324 // but need to make sure we currently propagate it to the global
1325 // cache even if only some of the evaluations actually reach the
1326 // recursion limit.
1327let mut encountered_overflow = false;
1328let mut i = 0;
1329loop {
1330let result = D::compute_goal(self, cx, input, inspect);
1331let stack_entry = self.stack.pop();
1332encountered_overflow |= stack_entry.encountered_overflow;
1333if true {
match (&stack_entry.input, &input) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!(stack_entry.input, input);
13341335// If the current goal is not a cycle head, we are done.
1336 //
1337 // There are no provisional cache entries which depend on this goal.
1338let Some(usages) = stack_entry.usages else {
1339return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1340 };
13411342// If it is a cycle head, we have to keep trying to prove it until
1343 // we reach a fixpoint. We need to do so for all cycle heads,
1344 // not only for the root.
1345 //
1346 // See tests/ui/traits/next-solver/cycles/fixpoint-rerun-all-cycle-heads.rs
1347 // for an example.
1348 //
1349 // Check whether we reached a fixpoint, either because the final result
1350 // is equal to the provisional result of the previous iteration, or because
1351 // this was only the head of either coinductive or inductive cycles, and the
1352 // final result is equal to the initial response for that case.
1353if let Ok(fixpoint) = self.reached_fixpoint(&stack_entry, usages, result) {
1354self.rebase_provisional_cache_entries(
1355cx,
1356&stack_entry,
1357 RebaseReason::ReachedFixpoint(fixpoint),
1358 );
1359return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1360 } else if usages.is_empty() {
1361self.rebase_provisional_cache_entries(
1362cx,
1363&stack_entry,
1364 RebaseReason::NoCycleUsages,
1365 );
1366return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1367 }
13681369// If computing this goal results in ambiguity with no constraints,
1370 // we do not rerun it. It's incredibly difficult to get a different
1371 // response in the next iteration in this case. These changes would
1372 // likely either be caused by incompleteness or can change the maybe
1373 // cause from ambiguity to overflow. Returning ambiguity always
1374 // preserves soundness and completeness even if the goal is be known
1375 // to succeed or fail.
1376 //
1377 // This prevents exponential blowup affecting multiple major crates.
1378 // As we only get to this branch if we haven't yet reached a fixpoint,
1379 // we also taint all provisional cache entries which depend on the
1380 // current goal.
1381if let Some(info) = D::is_ambiguous_result(result) {
1382self.rebase_provisional_cache_entries(
1383cx,
1384&stack_entry,
1385 RebaseReason::Ambiguity(info),
1386 );
1387return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1388 };
13891390// If we've reached the fixpoint step limit, we bail with overflow and taint all
1391 // provisional cache entries which depend on the current goal.
1392i += 1;
1393if i >= D::FIXPOINT_STEP_LIMIT {
1394{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1394",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1394u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("canonical cycle overflow")
as &dyn Value))])
});
} else { ; }
};debug!("canonical cycle overflow");
1395let result = D::fixpoint_overflow_result(cx, input);
1396self.rebase_provisional_cache_entries(cx, &stack_entry, RebaseReason::Overflow);
1397return EvaluationResult::finalize(stack_entry, encountered_overflow, result);
1398 }
13991400// Clear all provisional cache entries which depend on a previous provisional
1401 // result of this goal and rerun. This does not remove goals which accessed this
1402 // goal without depending on its result.
1403self.clear_dependent_provisional_results_for_rerun();
14041405{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1405",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1405u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message", "result"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("fixpoint changed provisional results")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&result) as
&dyn Value))])
});
} else { ; }
};debug!(?result, "fixpoint changed provisional results");
1406self.stack.push(StackEntry {
1407input,
1408 step_kind_from_parent: stack_entry.step_kind_from_parent,
1409 available_depth: stack_entry.available_depth,
1410 provisional_result: Some(result),
1411// We can keep these goals from previous iterations as they are only
1412 // ever read after finalizing this evaluation.
1413required_depth: stack_entry.required_depth,
1414 heads: stack_entry.heads,
1415 nested_goals: stack_entry.nested_goals,
1416// We reset these two fields when rerunning this goal. We could
1417 // keep `encountered_overflow` as it's only used as a performance
1418 // optimization. However, given that the proof tree will likely look
1419 // similar to the previous iterations when reevaluating, it's better
1420 // for caching if the reevaluation also starts out with `false`.
1421encountered_overflow: false,
1422// We keep provisional cache entries around if they used this goal
1423 // without depending on its result.
1424 //
1425 // We still need to drop or rebase these cache entries once we've
1426 // finished evaluating this goal.
1427usages: Some(HeadUsages::default()),
1428 candidate_usages: None,
1429 });
1430 }
1431 }
14321433/// When encountering a cycle, both inductive and coinductive, we only
1434 /// move the root into the global cache. We also store all other cycle
1435 /// participants involved.
1436 ///
1437 /// We must not use the global cache entry of a root goal if a cycle
1438 /// participant is on the stack. This is necessary to prevent unstable
1439 /// results. See the comment of `StackEntry::nested_goals` for
1440 /// more details.
1441fn insert_global_cache(
1442&mut self,
1443 cx: X,
1444 input: X::Input,
1445 evaluation_result: EvaluationResult<X>,
1446 dep_node: X::DepNodeIndex,
1447 ) {
1448{
use ::tracing::__macro_support::Callsite as _;
static __CALLSITE: ::tracing::callsite::DefaultCallsite =
{
static META: ::tracing::Metadata<'static> =
{
::tracing_core::metadata::Metadata::new("event compiler/rustc_type_ir/src/search_graph/mod.rs:1448",
"rustc_type_ir::search_graph", ::tracing::Level::DEBUG,
::tracing_core::__macro_support::Option::Some("compiler/rustc_type_ir/src/search_graph/mod.rs"),
::tracing_core::__macro_support::Option::Some(1448u32),
::tracing_core::__macro_support::Option::Some("rustc_type_ir::search_graph"),
::tracing_core::field::FieldSet::new(&["message",
"evaluation_result"],
::tracing_core::callsite::Identifier(&__CALLSITE)),
::tracing::metadata::Kind::EVENT)
};
::tracing::callsite::DefaultCallsite::new(&META)
};
let enabled =
::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
&&
::tracing::Level::DEBUG <=
::tracing::level_filters::LevelFilter::current() &&
{
let interest = __CALLSITE.interest();
!interest.is_never() &&
::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
interest)
};
if enabled {
(|value_set: ::tracing::field::ValueSet|
{
let meta = __CALLSITE.metadata();
::tracing::Event::dispatch(meta, &value_set);
;
})({
#[allow(unused_imports)]
use ::tracing::field::{debug, display, Value};
let mut iter = __CALLSITE.metadata().fields().iter();
__CALLSITE.metadata().fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&format_args!("insert global cache")
as &dyn Value)),
(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
::tracing::__macro_support::Option::Some(&debug(&evaluation_result)
as &dyn Value))])
});
} else { ; }
};debug!(?evaluation_result, "insert global cache");
1449cx.with_global_cache(|cache| cache.insert(cx, input, evaluation_result, dep_node))
1450 }
1451}