miri/borrow_tracker/tree_borrows/tree.rs
1//! In this file we handle the "Tree" part of Tree Borrows, i.e. all tree
2//! traversal functions, optimizations to trim branches, and keeping track of
3//! the relative position of the access to each node being updated. This of course
4//! also includes the definition of the tree structure.
5//!
6//! Functions here manipulate permissions but are oblivious to them: as
7//! the internals of `Permission` are private, the update process is a black
8//! box. All we need to know here are
9//! - the fact that updates depend only on the old state, the status of protectors,
10//! and the relative position of the access;
11//! - idempotency properties asserted in `perms.rs` (for optimizations)
12
13use std::ops::Range;
14use std::{cmp, fmt, mem};
15
16use rustc_abi::Size;
17use rustc_data_structures::fx::FxHashSet;
18use rustc_span::Span;
19use smallvec::SmallVec;
20
21use super::diagnostics::{
22 AccessCause, DiagnosticInfo, NodeDebugInfo, TbError, TransitionError,
23 no_valid_exposed_references_error,
24};
25use super::foreign_access_skipping::IdempotentForeignAccess;
26use super::perms::{PermTransition, Permission};
27use super::tree_visitor::{ChildrenVisitMode, ContinueTraversal, NodeAppArgs, TreeVisitor};
28use super::unimap::{UniIndex, UniKeyMap, UniValMap};
29use super::wildcard::ExposedCache;
30use crate::borrow_tracker::{AccessKind, GlobalState, ProtectorKind};
31use crate::*;
32
33mod tests;
34
35/// Data for a reference at single *location*.
36#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
37pub(super) struct LocationState {
38 /// A location is "accessed" when it is child-accessed for the first time (and the initial
39 /// retag initializes the location for the range covered by the type), and it then stays
40 /// accessed forever.
41 /// For accessed locations, "permission" is the current permission. However, for
42 /// non-accessed locations, we still need to track the "future initial permission": this will
43 /// start out to be `default_initial_perm`, but foreign accesses need to be taken into account.
44 /// Crucially however, while transitions to `Disabled` would usually be UB if this location is
45 /// protected, that is *not* the case for non-accessed locations. Instead we just have a latent
46 /// "future initial permission" of `Disabled`, causing UB only if an access is ever actually
47 /// performed.
48 /// Note that the tree root is also always accessed, as if the allocation was a write access.
49 accessed: bool,
50 /// This pointer's current permission / future initial permission.
51 permission: Permission,
52 /// See `foreign_access_skipping.rs`.
53 /// Stores an idempotent foreign access for this location and its children.
54 /// For correctness, this must not be too strong, and the recorded idempotent foreign access
55 /// of all children must be at least as strong as this. For performance, it should be as strong as possible.
56 idempotent_foreign_access: IdempotentForeignAccess,
57}
58
59impl LocationState {
60 /// Constructs a new initial state. It has neither been accessed, nor been subjected
61 /// to any foreign access yet.
62 /// The permission is not allowed to be `Unique`.
63 /// `sifa` is the (strongest) idempotent foreign access, see `foreign_access_skipping.rs`
64 pub fn new_non_accessed(permission: Permission, sifa: IdempotentForeignAccess) -> Self {
65 assert!(permission.is_initial() || permission.is_disabled());
66 assert!(!permission.is_unique());
67 Self { permission, accessed: false, idempotent_foreign_access: sifa }
68 }
69
70 /// Constructs a new initial state. It has not yet been subjected
71 /// to any foreign access. However, it is already marked as having been accessed.
72 /// `sifa` is the (strongest) idempotent foreign access, see `foreign_access_skipping.rs`
73 pub fn new_accessed(permission: Permission, sifa: IdempotentForeignAccess) -> Self {
74 Self { permission, accessed: true, idempotent_foreign_access: sifa }
75 }
76
77 /// Checks whether the current location state is ever reachable in a real execution.
78 pub fn possible(&self) -> bool {
79 // `Unique` can only be reached on actually accessed locations.
80 self.accessed || !self.permission.is_unique()
81 }
82
83 /// Check if the location has been accessed, i.e. if it has
84 /// ever been accessed through a child pointer.
85 pub fn accessed(&self) -> bool {
86 self.accessed
87 }
88
89 pub fn permission(&self) -> Permission {
90 self.permission
91 }
92
93 /// Performs an access on this index and updates node,
94 /// perm and wildcard_state to reflect the transition.
95 fn perform_transition(
96 &mut self,
97 idx: UniIndex,
98 nodes: &mut UniValMap<Node>,
99 exposed_cache: &mut ExposedCache,
100 access_kind: AccessKind,
101 relatedness: AccessRelatedness,
102 protected: bool,
103 diagnostics: &DiagnosticInfo,
104 ) -> Result<(), TransitionError> {
105 // Call this function now (i.e. only if we know `relatedness`), which
106 // ensures it is only called when `skip_if_known_noop` returns
107 // `Recurse`, due to the contract of `traverse_this_parents_children_other`.
108 self.record_new_access(access_kind, relatedness);
109 let old_access_level = self.permission.strongest_allowed_local_access(protected);
110 let transition = self.perform_access(access_kind, relatedness, protected)?;
111 if !transition.is_noop() {
112 let node = nodes.get_mut(idx).unwrap();
113 // Record the event as part of the history.
114 node.debug_info
115 .history
116 .push(diagnostics.create_event(transition, relatedness.is_foreign()));
117
118 // We need to update the wildcard state, if the permission
119 // of an exposed pointer changes.
120 if node.is_exposed {
121 let access_level = self.permission.strongest_allowed_local_access(protected);
122 exposed_cache.update_exposure(nodes, idx, old_access_level, access_level);
123 }
124 }
125 Ok(())
126 }
127
128 /// Apply the effect of an access to one location, including
129 /// - applying `Permission::perform_access` to the inner `Permission`,
130 /// - emitting protector UB if the location is accessed,
131 /// - updating the accessed status (child accesses produce accessed locations).
132 fn perform_access(
133 &mut self,
134 access_kind: AccessKind,
135 rel_pos: AccessRelatedness,
136 protected: bool,
137 ) -> Result<PermTransition, TransitionError> {
138 let old_perm = self.permission;
139 let transition = Permission::perform_access(access_kind, rel_pos, old_perm, protected)
140 .ok_or(TransitionError::ChildAccessForbidden(old_perm))?;
141 self.accessed |= !rel_pos.is_foreign();
142 self.permission = transition.applied(old_perm).unwrap();
143 // Why do only accessed locations cause protector errors?
144 // Consider two mutable references `x`, `y` into disjoint parts of
145 // the same allocation. A priori, these may actually both be used to
146 // access the entire allocation, as long as only reads occur. However,
147 // a write to `y` needs to somehow record that `x` can no longer be used
148 // on that location at all. For these non-accessed locations (i.e., locations
149 // that haven't been accessed with `x` yet), we track the "future initial state":
150 // it defaults to whatever the initial state of the tag is,
151 // but the access to `y` moves that "future initial state" of `x` to `Disabled`.
152 // However, usually a `Reserved -> Disabled` transition would be UB due to the protector!
153 // So clearly protectors shouldn't fire for such "future initial state" transitions.
154 //
155 // See the test `two_mut_protected_same_alloc` in `tests/pass/tree_borrows/tree-borrows.rs`
156 // for an example of safe code that would be UB if we forgot to check `self.accessed`.
157 if protected && self.accessed && transition.produces_disabled() {
158 return Err(TransitionError::ProtectedDisabled(old_perm));
159 }
160 debug_assert!(self.possible());
161 Ok(transition)
162 }
163
164 /// Like `perform_access`, but ignores the concrete error cause and also uses state-passing
165 /// rather than a mutable reference. As such, it returns `Some(x)` if the transition succeeded,
166 /// or `None` if there was an error.
167 #[cfg(test)]
168 fn perform_access_no_fluff(
169 mut self,
170 access_kind: AccessKind,
171 rel_pos: AccessRelatedness,
172 protected: bool,
173 ) -> Option<Self> {
174 match self.perform_access(access_kind, rel_pos, protected) {
175 Ok(_) => Some(self),
176 Err(_) => None,
177 }
178 }
179
180 /// Tree traversal optimizations. See `foreign_access_skipping.rs`.
181 /// This checks if such a foreign access can be skipped.
182 fn skip_if_known_noop(
183 &self,
184 access_kind: AccessKind,
185 rel_pos: AccessRelatedness,
186 ) -> ContinueTraversal {
187 if rel_pos.is_foreign() {
188 let happening_now = IdempotentForeignAccess::from_foreign(access_kind);
189 let mut new_access_noop =
190 self.idempotent_foreign_access.can_skip_foreign_access(happening_now);
191 if self.permission.is_disabled() {
192 // A foreign access to a `Disabled` tag will have almost no observable effect.
193 // It's a theorem that `Disabled` node have no protected accessed children,
194 // and so this foreign access will never trigger any protector.
195 // (Intuition: You're either protected accessed, and thus can't become Disabled
196 // or you're already Disabled protected, but not accessed, and then can't
197 // become accessed since that requires a child access, which Disabled blocks.)
198 // Further, the children will never be able to read or write again, since they
199 // have a `Disabled` parent. So this only affects diagnostics, such that the
200 // blocking write will still be identified directly, just at a different tag.
201 new_access_noop = true;
202 }
203 if self.permission.is_frozen() && access_kind == AccessKind::Read {
204 // A foreign read to a `Frozen` tag will have almost no observable effect.
205 // It's a theorem that `Frozen` nodes have no `Unique` children, so all children
206 // already survive foreign reads. Foreign reads in general have almost no
207 // effect, the only further thing they could do is make protected `Reserved`
208 // nodes become conflicted, i.e. make them reject child writes for the further
209 // duration of their protector. But such a child write is already rejected
210 // because this node is frozen. So this only affects diagnostics, but the
211 // blocking read will still be identified directly, just at a different tag.
212 new_access_noop = true;
213 }
214 if new_access_noop {
215 // Abort traversal if the new access is indeed guaranteed
216 // to be noop.
217 // No need to update `self.idempotent_foreign_access`,
218 // the type of the current streak among nonempty read-only
219 // or nonempty with at least one write has not changed.
220 ContinueTraversal::SkipSelfAndChildren
221 } else {
222 // Otherwise propagate this time, and also record the
223 // access that just occurred so that we can skip the propagation
224 // next time.
225 ContinueTraversal::Recurse
226 }
227 } else {
228 // A child access occurred, this breaks the streak of foreign
229 // accesses in a row and the sequence since the previous child access
230 // is now empty.
231 ContinueTraversal::Recurse
232 }
233 }
234
235 /// Records a new access, so that future access can potentially be skipped
236 /// by `skip_if_known_noop`. This must be called on child accesses, and otherwise
237 /// should be called on foreign accesses for increased performance. It should not be called
238 /// when `skip_if_known_noop` indicated skipping, since it then is a no-op.
239 /// See `foreign_access_skipping.rs`
240 fn record_new_access(&mut self, access_kind: AccessKind, rel_pos: AccessRelatedness) {
241 debug_assert!(matches!(
242 self.skip_if_known_noop(access_kind, rel_pos),
243 ContinueTraversal::Recurse
244 ));
245 self.idempotent_foreign_access
246 .record_new(IdempotentForeignAccess::from_acc_and_rel(access_kind, rel_pos));
247 }
248}
249
250impl fmt::Display for LocationState {
251 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
252 write!(f, "{}", self.permission)?;
253 if !self.accessed {
254 write!(f, "?")?;
255 }
256 Ok(())
257 }
258}
259/// The state of the full tree for a particular location: for all nodes, the local permissions
260/// of that node, and the tracking for wildcard accesses.
261#[derive(Clone, Debug, PartialEq, Eq)]
262pub struct LocationTree {
263 /// Maps a tag to a perm, with possible lazy initialization.
264 ///
265 /// NOTE: not all tags registered in `Tree::nodes` are necessarily in all
266 /// ranges of `perms`, because `perms` is in part lazily initialized.
267 /// Just because `nodes.get(key)` is `Some(_)` does not mean you can safely
268 /// `unwrap` any `perm.get(key)`.
269 ///
270 /// We do uphold the fact that `keys(perms)` is a subset of `keys(nodes)`
271 pub perms: UniValMap<LocationState>,
272 /// Caches information about the relatedness of nodes for a wildcard access.
273 pub exposed_cache: ExposedCache,
274}
275/// Tree structure with both parents and children since we want to be
276/// able to traverse the tree efficiently in both directions.
277#[derive(Clone, Debug)]
278pub struct Tree {
279 /// Mapping from tags to keys. The key obtained can then be used in
280 /// any of the `UniValMap` relative to this allocation, i.e.
281 /// `nodes`, `LocationTree::perms` and `LocationTree::exposed_cache`
282 /// of the same `Tree`.
283 /// The parent-child relationship in `Node` is encoded in terms of these same
284 /// keys, so traversing the entire tree needs exactly one access to
285 /// `tag_mapping`.
286 pub(super) tag_mapping: UniKeyMap<BorTag>,
287 /// All nodes of this tree.
288 pub(super) nodes: UniValMap<Node>,
289 /// Associates with each location its state and wildcard access tracking.
290 pub(super) locations: DedupRangeMap<LocationTree>,
291 /// Contains both the root of the main tree as well as the roots of the wildcard subtrees.
292 ///
293 /// If we reborrow a reference which has wildcard provenance, then we do not know where in
294 /// the tree to attach them. Instead we create a new additional tree for this allocation
295 /// with this new reference as a root. We call this additional tree a wildcard subtree.
296 ///
297 /// The actual structure should be a single tree but with wildcard provenance we approximate
298 /// this with this ordered set of trees. Each wildcard subtree is the direct child of *some* exposed
299 /// tag (that is smaller than the root), but we do not know which. This also means that it can only be the
300 /// child of a tree that comes before it in the vec ensuring we don't have any cycles in our
301 /// approximated tree.
302 ///
303 /// Sorted according to `BorTag` from low to high. This also means the main root is `root[0]`.
304 ///
305 /// Has array size 2 because that still ensures the minimum size for SmallVec.
306 pub(super) roots: SmallVec<[UniIndex; 2]>,
307}
308
309/// A node in the borrow tree. Each node is uniquely identified by a tag via
310/// the `nodes` map of `Tree`.
311#[derive(Clone, Debug)]
312pub(super) struct Node {
313 /// The tag of this node.
314 pub tag: BorTag,
315 /// All tags except the root have a parent tag.
316 pub parent: Option<UniIndex>,
317 /// If the pointer was reborrowed, it has children.
318 // FIXME: bench to compare this to FxHashSet and to other SmallVec sizes
319 pub children: SmallVec<[UniIndex; 4]>,
320 /// Either `Reserved`, `Frozen`, or `Disabled`, it is the permission this tag will
321 /// lazily be initialized to on the first access.
322 /// It is only ever `Disabled` for a tree root, since the root is initialized to `Unique` by
323 /// its own separate mechanism.
324 default_initial_perm: Permission,
325 /// The default initial (strongest) idempotent foreign access.
326 /// This participates in the invariant for `LocationState::idempotent_foreign_access`
327 /// in cases where there is no location state yet. See `foreign_access_skipping.rs`,
328 /// and `LocationState::idempotent_foreign_access` for more information
329 default_initial_idempotent_foreign_access: IdempotentForeignAccess,
330 /// Whether a wildcard access could happen through this node.
331 pub is_exposed: bool,
332 /// Some extra information useful only for debugging purposes.
333 pub debug_info: NodeDebugInfo,
334}
335
336impl Tree {
337 /// Create a new tree, with only a root pointer.
338 pub fn new(root_tag: BorTag, size: Size, span: Span) -> Self {
339 // The root has `Disabled` as the default permission,
340 // so that any access out of bounds is invalid.
341 let root_default_perm = Permission::new_disabled();
342 let mut tag_mapping = UniKeyMap::default();
343 let root_idx = tag_mapping.insert(root_tag);
344 let nodes = {
345 let mut nodes = UniValMap::<Node>::default();
346 let mut debug_info = NodeDebugInfo::new(root_tag, root_default_perm, span);
347 // name the root so that all allocations contain one named pointer
348 debug_info.add_name("root of the allocation");
349 nodes.insert(
350 root_idx,
351 Node {
352 tag: root_tag,
353 parent: None,
354 children: SmallVec::default(),
355 default_initial_perm: root_default_perm,
356 // The root may never be skipped, all accesses will be local.
357 default_initial_idempotent_foreign_access: IdempotentForeignAccess::None,
358 is_exposed: false,
359 debug_info,
360 },
361 );
362 nodes
363 };
364 let locations = {
365 let mut perms = UniValMap::default();
366 // We manually set it to `Unique` on all in-bounds positions.
367 // We also ensure that it is accessed, so that no `Unique` but
368 // not yet accessed nodes exist. Essentially, we pretend there
369 // was a write that initialized these to `Unique`.
370 perms.insert(
371 root_idx,
372 LocationState::new_accessed(
373 Permission::new_unique(),
374 IdempotentForeignAccess::None,
375 ),
376 );
377 let exposed_cache = ExposedCache::default();
378 DedupRangeMap::new(size, LocationTree { perms, exposed_cache })
379 };
380 Self { roots: SmallVec::from_slice(&[root_idx]), nodes, locations, tag_mapping }
381 }
382}
383
384impl<'tcx> Tree {
385 /// Insert a new tag in the tree.
386 ///
387 /// `inside_perm` defines the initial permissions for a block of memory starting at
388 /// `base_offset`. These may nor may not be already marked as "accessed".
389 /// `outside_perm` defines the initial permission for the rest of the allocation.
390 /// These are definitely not "accessed".
391 pub(super) fn new_child(
392 &mut self,
393 base_offset: Size,
394 parent_prov: ProvenanceExtra,
395 new_tag: BorTag,
396 inside_perms: DedupRangeMap<LocationState>,
397 outside_perm: Permission,
398 protected: bool,
399 span: Span,
400 ) -> InterpResult<'tcx> {
401 let idx = self.tag_mapping.insert(new_tag);
402 let parent_idx = match parent_prov {
403 ProvenanceExtra::Concrete(parent_tag) =>
404 Some(self.tag_mapping.get(&parent_tag).unwrap()),
405 ProvenanceExtra::Wildcard => None,
406 };
407 assert!(outside_perm.is_initial());
408 assert!(!outside_perm.is_unique());
409
410 let default_strongest_idempotent =
411 outside_perm.strongest_idempotent_foreign_access(protected);
412 // Create the node
413 self.nodes.insert(
414 idx,
415 Node {
416 tag: new_tag,
417 parent: parent_idx,
418 children: SmallVec::default(),
419 default_initial_perm: outside_perm,
420 default_initial_idempotent_foreign_access: default_strongest_idempotent,
421 is_exposed: false,
422 debug_info: NodeDebugInfo::new(new_tag, outside_perm, span),
423 },
424 );
425 if let Some(parent_idx) = parent_idx {
426 let parent_node = self.nodes.get_mut(parent_idx).unwrap();
427 // Register new_tag as a child of parent_tag
428 parent_node.children.push(idx);
429 } else {
430 // If the parent had wildcard provenance, then register the idx
431 // as a new wildcard root.
432 // This preserves the orderedness of `roots` because a newly created
433 // tag is greater than all previous tags.
434 self.roots.push(idx);
435 }
436
437 // We need to know the weakest SIFA for `update_idempotent_foreign_access_after_retag`.
438 let mut min_sifa = default_strongest_idempotent;
439 for (Range { start, end }, &perm) in
440 inside_perms.iter(Size::from_bytes(0), inside_perms.size())
441 {
442 assert!(perm.permission.is_initial());
443 assert_eq!(
444 perm.idempotent_foreign_access,
445 perm.permission.strongest_idempotent_foreign_access(protected)
446 );
447
448 min_sifa = cmp::min(min_sifa, perm.idempotent_foreign_access);
449 for (_range, loc) in self
450 .locations
451 .iter_mut(Size::from_bytes(start) + base_offset, Size::from_bytes(end - start))
452 {
453 loc.perms.insert(idx, perm);
454 }
455 }
456
457 // We don't have to update `exposed_cache` as the new node is not exposed and
458 // has no children so the default counts of 0 are correct.
459
460 // If the parent is a wildcard pointer, then it doesn't track SIFA and doesn't need to be updated.
461 if let Some(parent_idx) = parent_idx {
462 // Inserting the new perms might have broken the SIFA invariant (see
463 // `foreign_access_skipping.rs`) if the SIFA we inserted is weaker than that of some parent.
464 // We now weaken the recorded SIFA for our parents, until the invariant is restored. We
465 // could weaken them all to `None`, but it is more efficient to compute the SIFA for the new
466 // permission statically, and use that. For this we need the *minimum* SIFA (`None` needs
467 // more fixup than `Write`).
468 self.update_idempotent_foreign_access_after_retag(parent_idx, min_sifa);
469 }
470
471 interp_ok(())
472 }
473
474 /// Restores the SIFA "children are stronger"/"parents are weaker" invariant after a retag:
475 /// reduce the SIFA of `current` and its parents to be no stronger than `strongest_allowed`.
476 /// See `foreign_access_skipping.rs` and [`Tree::new_child`].
477 fn update_idempotent_foreign_access_after_retag(
478 &mut self,
479 mut current: UniIndex,
480 strongest_allowed: IdempotentForeignAccess,
481 ) {
482 if strongest_allowed == IdempotentForeignAccess::Write {
483 // Nothing is stronger than `Write`.
484 return;
485 }
486 // We walk the tree upwards, until the invariant is restored
487 loop {
488 let current_node = self.nodes.get_mut(current).unwrap();
489 // Call `ensure_no_stronger_than` on all SIFAs for this node: the per-location SIFA, as well
490 // as the default SIFA for not-yet-initialized locations.
491 // Record whether we did any change; if not, the invariant is restored and we can stop the traversal.
492 let mut any_change = false;
493 for (_range, loc) in self.locations.iter_mut_all() {
494 // Check if this node has a state for this location (or range of locations).
495 if let Some(perm) = loc.perms.get_mut(current) {
496 // Update the per-location SIFA, recording if it changed.
497 any_change |=
498 perm.idempotent_foreign_access.ensure_no_stronger_than(strongest_allowed);
499 }
500 }
501 // Now update `default_initial_idempotent_foreign_access`, which stores the default SIFA for not-yet-initialized locations.
502 any_change |= current_node
503 .default_initial_idempotent_foreign_access
504 .ensure_no_stronger_than(strongest_allowed);
505
506 if any_change {
507 let Some(next) = self.nodes.get(current).unwrap().parent else {
508 // We have arrived at the root.
509 break;
510 };
511 current = next;
512 continue;
513 } else {
514 break;
515 }
516 }
517 }
518
519 /// Deallocation requires
520 /// - a pointer that permits write accesses
521 /// - the absence of Strong Protectors anywhere in the allocation
522 pub fn dealloc(
523 &mut self,
524 prov: ProvenanceExtra,
525 access_range: AllocRange,
526 global: &GlobalState,
527 alloc_id: AllocId, // diagnostics
528 span: Span, // diagnostics
529 ) -> InterpResult<'tcx> {
530 self.perform_access(
531 prov,
532 access_range,
533 AccessKind::Write,
534 AccessCause::Dealloc,
535 global,
536 alloc_id,
537 span,
538 )?;
539
540 let start_idx = match prov {
541 ProvenanceExtra::Concrete(tag) => Some(self.tag_mapping.get(&tag).unwrap()),
542 ProvenanceExtra::Wildcard => None,
543 };
544
545 // Check if this breaks any strong protector.
546 // (Weak protectors are already handled by `perform_access`.)
547 for (loc_range, loc) in self.locations.iter_mut(access_range.start, access_range.size) {
548 let diagnostics = DiagnosticInfo {
549 alloc_id,
550 span,
551 transition_range: loc_range,
552 access_range: Some(access_range),
553 access_cause: AccessCause::Dealloc,
554 };
555 // Checks the tree containing `idx` for strong protector violations.
556 // It does this in traversal order.
557 let mut check_tree = |idx| {
558 TreeVisitor { nodes: &mut self.nodes, data: loc }
559 .traverse_this_parents_children_other(
560 idx,
561 // Visit all children, skipping none.
562 |_| ContinueTraversal::Recurse,
563 |args: NodeAppArgs<'_, _>| {
564 let node = args.nodes.get(args.idx).unwrap();
565
566 let perm = args
567 .data
568 .perms
569 .get(args.idx)
570 .copied()
571 .unwrap_or_else(|| node.default_location_state());
572 if global.borrow().protected_tags.get(&node.tag)
573 == Some(&ProtectorKind::StrongProtector)
574 // Don't check for protector if it is a Cell (see `unsafe_cell_deallocate` in `interior_mutability.rs`).
575 // Related to https://github.com/rust-lang/rust/issues/55005.
576 && !perm.permission.is_cell()
577 // Only trigger UB if the accessed bit is set, i.e. if the protector is actually protecting this offset. See #4579.
578 && perm.accessed
579 {
580 Err(TbError {
581 error_kind: TransitionError::ProtectedDealloc,
582 access_info: &diagnostics,
583 conflicting_node_info: &node.debug_info,
584 accessed_node_info: start_idx
585 .map(|idx| &args.nodes.get(idx).unwrap().debug_info),
586 }
587 .build())
588 } else {
589 Ok(())
590 }
591 },
592 )
593 };
594 // If we have a start index we first check its subtree in traversal order.
595 // This results in us showing the error of the closest node instead of an
596 // arbitrary one.
597 let accessed_root = start_idx.map(&mut check_tree).transpose()?;
598 // Afterwards we check all other trees.
599 // We iterate over the list in reverse order to ensure that we do not visit
600 // a parent before its child.
601 for &root in self.roots.iter().rev() {
602 if Some(root) == accessed_root {
603 continue;
604 }
605 check_tree(root)?;
606 }
607 }
608 interp_ok(())
609 }
610
611 /// Map the per-node and per-location `LocationState::perform_access`
612 /// to each location of the first component of `access_range_and_kind`,
613 /// on every tag of the allocation.
614 ///
615 /// `LocationState::perform_access` will take care of raising transition
616 /// errors and updating the `accessed` status of each location,
617 /// this traversal adds to that:
618 /// - inserting into the map locations that do not exist yet,
619 /// - trimming the traversal,
620 /// - recording the history.
621 pub fn perform_access(
622 &mut self,
623 prov: ProvenanceExtra,
624 access_range: AllocRange,
625 access_kind: AccessKind,
626 access_cause: AccessCause, // diagnostics
627 global: &GlobalState,
628 alloc_id: AllocId, // diagnostics
629 span: Span, // diagnostics
630 ) -> InterpResult<'tcx> {
631 #[cfg(feature = "expensive-consistency-checks")]
632 if self.roots.len() > 1 || matches!(prov, ProvenanceExtra::Wildcard) {
633 self.verify_wildcard_consistency(global);
634 }
635
636 let source_idx = match prov {
637 ProvenanceExtra::Concrete(tag) => Some(self.tag_mapping.get(&tag).unwrap()),
638 ProvenanceExtra::Wildcard => None,
639 };
640 // We iterate over affected locations and traverse the tree for each of them.
641 for (loc_range, loc) in self.locations.iter_mut(access_range.start, access_range.size) {
642 let diagnostics = DiagnosticInfo {
643 access_cause,
644 access_range: Some(access_range),
645 alloc_id,
646 span,
647 transition_range: loc_range,
648 };
649 loc.perform_access(
650 self.roots.iter().copied(),
651 &mut self.nodes,
652 source_idx,
653 access_kind,
654 global,
655 ChildrenVisitMode::VisitChildrenOfAccessed,
656 &diagnostics,
657 /* min_exposed_child */ None, // only matters for protector end access,
658 )?;
659 }
660 interp_ok(())
661 }
662 /// This is the special access that is applied on protector release:
663 /// - the access will be applied only to accessed locations of the allocation,
664 /// - it will not be visible to children,
665 /// - it will be recorded as a `FnExit` diagnostic access
666 /// - and it will be a read except if the location is `Unique`, i.e. has been written to,
667 /// in which case it will be a write.
668 /// - otherwise identical to `Tree::perform_access`
669 pub fn perform_protector_end_access(
670 &mut self,
671 tag: BorTag,
672 global: &GlobalState,
673 alloc_id: AllocId, // diagnostics
674 span: Span, // diagnostics
675 ) -> InterpResult<'tcx> {
676 #[cfg(feature = "expensive-consistency-checks")]
677 if self.roots.len() > 1 {
678 self.verify_wildcard_consistency(global);
679 }
680
681 let source_idx = self.tag_mapping.get(&tag).unwrap();
682
683 let min_exposed_child = if self.roots.len() > 1 {
684 LocationTree::get_min_exposed_child(source_idx, &self.nodes)
685 } else {
686 // There's no point in computing this when there is just one tree.
687 None
688 };
689
690 // This is a special access through the entire allocation.
691 // It actually only affects `accessed` locations, so we need
692 // to filter on those before initiating the traversal.
693 //
694 // In addition this implicit access should not be visible to children,
695 // thus the use of `traverse_nonchildren`.
696 // See the test case `returned_mut_is_usable` from
697 // `tests/pass/tree_borrows/tree-borrows.rs` for an example of
698 // why this is important.
699 for (loc_range, loc) in self.locations.iter_mut_all() {
700 // Only visit accessed permissions
701 if let Some(p) = loc.perms.get(source_idx)
702 && let Some(access_kind) = p.permission.associated_access()
703 && p.accessed
704 {
705 let diagnostics = DiagnosticInfo {
706 access_cause: AccessCause::FnExit(access_kind),
707 access_range: None,
708 alloc_id,
709 span,
710 transition_range: loc_range,
711 };
712 loc.perform_access(
713 self.roots.iter().copied(),
714 &mut self.nodes,
715 Some(source_idx),
716 access_kind,
717 global,
718 ChildrenVisitMode::SkipChildrenOfAccessed,
719 &diagnostics,
720 min_exposed_child,
721 )?;
722 }
723 }
724 interp_ok(())
725 }
726}
727
728/// Integration with the BorTag garbage collector
729impl Tree {
730 pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<BorTag>) {
731 for i in 0..(self.roots.len()) {
732 self.remove_useless_children(self.roots[i], live_tags);
733 }
734 // Right after the GC runs is a good moment to check if we can
735 // merge some adjacent ranges that were made equal by the removal of some
736 // tags (this does not necessarily mean that they have identical internal representations,
737 // see the `PartialEq` impl for `UniValMap`)
738 self.locations.merge_adjacent_thorough();
739 }
740
741 /// Checks if a node is useless and should be GC'ed.
742 /// A node is useless if it has no children and also the tag is no longer live.
743 fn is_useless(&self, idx: UniIndex, live: &FxHashSet<BorTag>) -> bool {
744 let node = self.nodes.get(idx).unwrap();
745 node.children.is_empty() && !live.contains(&node.tag)
746 }
747
748 /// Checks whether a node can be replaced by its only child.
749 /// If so, returns the index of said only child.
750 /// If not, returns none.
751 fn can_be_replaced_by_single_child(
752 &self,
753 idx: UniIndex,
754 live: &FxHashSet<BorTag>,
755 ) -> Option<UniIndex> {
756 let node = self.nodes.get(idx).unwrap();
757
758 let [child_idx] = node.children[..] else { return None };
759
760 // We never want to replace the root node, as it is also kept in `root_ptr_tags`.
761 if live.contains(&node.tag) || node.parent.is_none() {
762 return None;
763 }
764 // Since protected nodes are never GC'd (see `borrow_tracker::FrameExtra::visit_provenance`),
765 // we know that `node` is not protected because otherwise `live` would
766 // have contained `node.tag`.
767 let child = self.nodes.get(child_idx).unwrap();
768 // Check that for that one child, `can_be_replaced_by_child` holds for the permission
769 // on all locations.
770 for (_range, loc) in self.locations.iter_all() {
771 let parent_perm = loc
772 .perms
773 .get(idx)
774 .map(|x| x.permission)
775 .unwrap_or_else(|| node.default_initial_perm);
776 let child_perm = loc
777 .perms
778 .get(child_idx)
779 .map(|x| x.permission)
780 .unwrap_or_else(|| child.default_initial_perm);
781 if !parent_perm.can_be_replaced_by_child(child_perm) {
782 return None;
783 }
784 }
785
786 Some(child_idx)
787 }
788
789 /// Properly removes a node.
790 /// The node to be removed should not otherwise be usable. It also
791 /// should have no children, but this is not checked, so that nodes
792 /// whose children were rotated somewhere else can be deleted without
793 /// having to first modify them to clear that array.
794 fn remove_useless_node(&mut self, this: UniIndex) {
795 // Due to the API of UniMap we must make sure to call
796 // `UniValMap::remove` for the key of this node on *all* maps that used it
797 // (which are `self.nodes` and every range of `self.rperms`)
798 // before we can safely apply `UniKeyMap::remove` to truly remove
799 // this tag from the `tag_mapping`.
800 let node = self.nodes.remove(this).unwrap();
801 for (_range, loc) in self.locations.iter_mut_all() {
802 loc.perms.remove(this);
803 loc.exposed_cache.remove(this);
804 }
805 self.tag_mapping.remove(&node.tag);
806 }
807
808 /// Traverses the entire tree looking for useless tags.
809 /// Removes from the tree all useless child nodes of root.
810 /// It will not delete the root itself.
811 ///
812 /// NOTE: This leaves in the middle of the tree tags that are unreachable but have
813 /// reachable children. There is a potential for compacting the tree by reassigning
814 /// children of dead tags to the nearest live parent, but it must be done with care
815 /// not to remove UB.
816 ///
817 /// Example: Consider the tree `root - parent - child`, with `parent: Frozen` and
818 /// `child: Reserved`. This tree can exist. If we blindly delete `parent` and reassign
819 /// `child` to be a direct child of `root` then Writes to `child` are now permitted
820 /// whereas they were not when `parent` was still there.
821 fn remove_useless_children(&mut self, root: UniIndex, live: &FxHashSet<BorTag>) {
822 // To avoid stack overflows, we roll our own stack.
823 // Each element in the stack consists of the current tag, and the number of the
824 // next child to be processed.
825
826 // The other functions are written using the `TreeVisitorStack`, but that does not work here
827 // since we need to 1) do a post-traversal and 2) remove nodes from the tree.
828 // Since we do a post-traversal (by deleting nodes only after handling all children),
829 // we also need to be a bit smarter than "pop node, push all children."
830 let mut stack = vec![(root, 0)];
831 while let Some((tag, nth_child)) = stack.last_mut() {
832 let node = self.nodes.get(*tag).unwrap();
833 if *nth_child < node.children.len() {
834 // Visit the child by pushing it to the stack.
835 // Also increase `nth_child` so that when we come back to the `tag` node, we
836 // look at the next child.
837 let next_child = node.children[*nth_child];
838 *nth_child += 1;
839 stack.push((next_child, 0));
840 continue;
841 } else {
842 // We have processed all children of `node`, so now it is time to process `node` itself.
843 // First, get the current children of `node`. To appease the borrow checker,
844 // we have to temporarily move the list out of the node, and then put the
845 // list of remaining children back in.
846 let mut children_of_node =
847 mem::take(&mut self.nodes.get_mut(*tag).unwrap().children);
848 // Remove all useless children.
849 children_of_node.retain_mut(|idx| {
850 if self.is_useless(*idx, live) {
851 // Delete `idx` node everywhere else.
852 self.remove_useless_node(*idx);
853 // And delete it from children_of_node.
854 false
855 } else {
856 if let Some(nextchild) = self.can_be_replaced_by_single_child(*idx, live) {
857 // `nextchild` is our grandchild, and will become our direct child.
858 // Delete the in-between node, `idx`.
859 self.remove_useless_node(*idx);
860 // Set the new child's parent.
861 self.nodes.get_mut(nextchild).unwrap().parent = Some(*tag);
862 // Save the new child in children_of_node.
863 *idx = nextchild;
864 }
865 // retain it
866 true
867 }
868 });
869 // Put back the now-filtered vector.
870 self.nodes.get_mut(*tag).unwrap().children = children_of_node;
871
872 // We are done, the parent can continue.
873 stack.pop();
874 continue;
875 }
876 }
877 }
878}
879
880impl<'tcx> LocationTree {
881 /// Returns the smallest exposed tag, if any, that is a transitive child of `root`.
882 fn get_min_exposed_child(root: UniIndex, nodes: &UniValMap<Node>) -> Option<BorTag> {
883 // We cannot use the wildcard datastructure to improve this lookup. This is because
884 // the datastructure only tracks enabled nodes and we need to also consider disabled ones.
885 let mut stack = vec![root];
886 let mut min_tag = None;
887 while let Some(idx) = stack.pop() {
888 let node = nodes.get(idx).unwrap();
889 if min_tag.is_some_and(|min| min < node.tag) {
890 // The minimum we found before is bigger than this tag, and therefore
891 // also bigger than all its children, so we can skip this subtree.
892 continue;
893 }
894 stack.extend_from_slice(node.children.as_slice());
895 if node.is_exposed {
896 min_tag = match min_tag {
897 Some(prev) if prev < node.tag => Some(prev),
898 _ => Some(node.tag),
899 };
900 }
901 }
902 min_tag
903 }
904
905 /// Performs an access on this location.
906 /// * `access_source`: The index, if any, where the access came from.
907 /// * `visit_children`: Whether to skip updating the children of `access_source`.
908 /// * `min_exposed_child`: The tag of the smallest exposed (transitive) child of the accessed node.
909 /// This is only used with `visit_children == SkipChildrenOfAccessed`, where we need to skip children
910 /// of the accessed node.
911 fn perform_access(
912 &mut self,
913 roots: impl Iterator<Item = UniIndex>,
914 nodes: &mut UniValMap<Node>,
915 access_source: Option<UniIndex>,
916 access_kind: AccessKind,
917 global: &GlobalState,
918 visit_children: ChildrenVisitMode,
919 diagnostics: &DiagnosticInfo,
920 min_exposed_child: Option<BorTag>,
921 ) -> InterpResult<'tcx> {
922 let accessed_root = if let Some(idx) = access_source {
923 Some(self.perform_normal_access(
924 idx,
925 nodes,
926 access_kind,
927 global,
928 visit_children,
929 diagnostics,
930 )?)
931 } else {
932 // `SkipChildrenOfAccessed` only gets set on protector release, which only
933 // occurs on a known node.
934 assert!(matches!(visit_children, ChildrenVisitMode::VisitChildrenOfAccessed));
935 None
936 };
937
938 let accessed_root_tag = accessed_root.map(|idx| nodes.get(idx).unwrap().tag);
939 for (i, root) in roots.enumerate() {
940 let tag = nodes.get(root).unwrap().tag;
941 // On a protector release access we have to skip the children of the accessed tag.
942 // However, if the tag has exposed children then some of the wildcard subtrees could
943 // also be children of the accessed node and would also need to be skipped. We can
944 // narrow down which wildcard trees might be children by comparing their root tag to the
945 // minimum exposed child of the accessed node. As the parent tag is always smaller
946 // than the child tag this means we only need to skip subtrees with a root tag larger
947 // than `min_exposed_child`. Once we find such a root, we can leave the loop because roots
948 // are sorted by tag.
949 if matches!(visit_children, ChildrenVisitMode::SkipChildrenOfAccessed)
950 && let Some(min_exposed_child) = min_exposed_child
951 && tag > min_exposed_child
952 {
953 break;
954 }
955 // We don't perform a wildcard access on the tree we already performed a
956 // normal access on.
957 if Some(root) == accessed_root {
958 continue;
959 }
960 // The choice of `max_local_tag` requires some thought.
961 // This can only be a local access for nodes that are a parent of the accessed node
962 // and are therefore smaller, so the accessed node itself is a valid choice for `max_local_tag`.
963 // However, using `accessed_root` is better since that will be smaller. It is still a valid choice
964 // because for nodes *in other trees*, if they are a parent of the accessed node then they
965 // are a parent of `accessed_root`.
966 //
967 // As a consequence of this, since the root of the main tree is the smallest tag in the entire
968 // allocation, if the access occurred in the main tree then other subtrees will only see foreign accesses.
969 self.perform_wildcard_access(
970 root,
971 access_source,
972 /*max_local_tag*/ accessed_root_tag,
973 nodes,
974 access_kind,
975 global,
976 diagnostics,
977 /*is_wildcard_tree*/ i != 0,
978 )?;
979 }
980 interp_ok(())
981 }
982
983 /// Performs a normal access on the tree containing `access_source`.
984 ///
985 /// Returns the root index of this tree.
986 /// * `access_source`: The index of the tag being accessed.
987 /// * `visit_children`: Whether to skip the children of `access_source`
988 /// during the access. Used for protector end access.
989 fn perform_normal_access(
990 &mut self,
991 access_source: UniIndex,
992 nodes: &mut UniValMap<Node>,
993 access_kind: AccessKind,
994 global: &GlobalState,
995 visit_children: ChildrenVisitMode,
996 diagnostics: &DiagnosticInfo,
997 ) -> InterpResult<'tcx, UniIndex> {
998 // Performs the per-node work:
999 // - insert the permission if it does not exist
1000 // - perform the access
1001 // - record the transition
1002 // to which some optimizations are added:
1003 // - skip the traversal of the children in some cases
1004 // - do not record noop transitions
1005 //
1006 // `loc_range` is only for diagnostics (it is the range of
1007 // the `RangeMap` on which we are currently working).
1008 let node_skipper = |args: &NodeAppArgs<'_, LocationTree>| -> ContinueTraversal {
1009 let node = args.nodes.get(args.idx).unwrap();
1010 let perm = args.data.perms.get(args.idx);
1011
1012 let old_state = perm.copied().unwrap_or_else(|| node.default_location_state());
1013 old_state.skip_if_known_noop(access_kind, args.rel_pos)
1014 };
1015 let node_app = |args: NodeAppArgs<'_, LocationTree>| {
1016 let node = args.nodes.get_mut(args.idx).unwrap();
1017 let mut perm = args.data.perms.entry(args.idx);
1018
1019 let state = perm.or_insert(node.default_location_state());
1020
1021 let protected = global.borrow().protected_tags.contains_key(&node.tag);
1022 state
1023 .perform_transition(
1024 args.idx,
1025 args.nodes,
1026 &mut args.data.exposed_cache,
1027 access_kind,
1028 args.rel_pos,
1029 protected,
1030 diagnostics,
1031 )
1032 .map_err(|error_kind| {
1033 TbError {
1034 error_kind,
1035 access_info: diagnostics,
1036 conflicting_node_info: &args.nodes.get(args.idx).unwrap().debug_info,
1037 accessed_node_info: Some(
1038 &args.nodes.get(access_source).unwrap().debug_info,
1039 ),
1040 }
1041 .build()
1042 })
1043 };
1044
1045 let visitor = TreeVisitor { nodes, data: self };
1046 match visit_children {
1047 ChildrenVisitMode::VisitChildrenOfAccessed =>
1048 visitor.traverse_this_parents_children_other(access_source, node_skipper, node_app),
1049 ChildrenVisitMode::SkipChildrenOfAccessed =>
1050 visitor.traverse_nonchildren(access_source, node_skipper, node_app),
1051 }
1052 .into()
1053 }
1054
1055 /// Performs a wildcard access on the tree with root `root`. Takes the `access_relatedness`
1056 /// for each node from the `WildcardState` datastructure.
1057 /// * `root`: Root of the tree being accessed.
1058 /// * `access_source`: the index of the accessed tag, if any.
1059 /// This is only used for printing the correct tag on errors.
1060 /// * `max_local_tag`: The access can only be local for nodes whose tag is
1061 /// at most `max_local_tag`.
1062 fn perform_wildcard_access(
1063 &mut self,
1064 root: UniIndex,
1065 access_source: Option<UniIndex>,
1066 max_local_tag: Option<BorTag>,
1067 nodes: &mut UniValMap<Node>,
1068 access_kind: AccessKind,
1069 global: &GlobalState,
1070 diagnostics: &DiagnosticInfo,
1071 is_wildcard_tree: bool,
1072 ) -> InterpResult<'tcx> {
1073 let get_relatedness = |idx: UniIndex, node: &Node, loc: &LocationTree| {
1074 // If the tag is larger than `max_local_tag` then the access can only be foreign.
1075 let only_foreign = max_local_tag.is_some_and(|max_local_tag| max_local_tag < node.tag);
1076 loc.exposed_cache.access_relatedness(
1077 root,
1078 idx,
1079 access_kind,
1080 is_wildcard_tree,
1081 only_foreign,
1082 )
1083 };
1084
1085 // Whether there is an exposed node in this tree that allows this access.
1086 let mut has_valid_exposed = false;
1087
1088 // This does a traversal across the tree updating children before their parents. The
1089 // difference to `perform_normal_access` is that we take the access relatedness from
1090 // the wildcard tracking state of the node instead of from the visitor itself.
1091 //
1092 // Unlike for a normal access, the iteration order is important for improving the
1093 // accuracy of wildcard accesses if `max_local_tag` is `Some`: processing the effects of this
1094 // access further down the tree can cause exposed nodes to lose permissions, thus updating
1095 // the wildcard data structure, which will be taken into account when processing the parent
1096 // nodes. Also see the test `cross_tree_update_older_invalid_exposed2.rs`
1097 // (Doing accesses in the opposite order cannot help with precision but the reasons are complicated;
1098 // see <https://github.com/rust-lang/miri/pull/4707#discussion_r2581661123>.)
1099 //
1100 // Note, however, that this is an approximation: there can be situations where a node is
1101 // marked as having an exposed foreign node, but actually that foreign node cannot be
1102 // the source of the access due to `max_local_tag`. The wildcard tracking cannot know
1103 // about `max_local_tag` so we will incorrectly assume that this might be a foreign access.
1104 TreeVisitor { data: self, nodes }.traverse_children_this(
1105 root,
1106 |args| -> ContinueTraversal {
1107 let node = args.nodes.get(args.idx).unwrap();
1108 let perm = args.data.perms.get(args.idx);
1109
1110 let old_state = perm.copied().unwrap_or_else(|| node.default_location_state());
1111 // If we know where, relative to this node, the wildcard access occurs,
1112 // then check if we can skip the entire subtree.
1113 if let Some(relatedness) = get_relatedness(args.idx, node, args.data)
1114 && let Some(relatedness) = relatedness.to_relatedness()
1115 {
1116 // We can use the usual SIFA machinery to skip nodes.
1117 old_state.skip_if_known_noop(access_kind, relatedness)
1118 } else {
1119 ContinueTraversal::Recurse
1120 }
1121 },
1122 |args| {
1123 let node = args.nodes.get_mut(args.idx).unwrap();
1124
1125 let protected = global.borrow().protected_tags.contains_key(&node.tag);
1126
1127 let Some(wildcard_relatedness) = get_relatedness(args.idx, node, args.data) else {
1128 // There doesn't exist a valid exposed reference for this access to
1129 // happen through.
1130 // This can only happen if `root` is the main root: We set
1131 // `max_foreign_access==Write` on all wildcard roots, so at least a foreign access
1132 // is always possible on all nodes in a wildcard subtree.
1133 return Err(no_valid_exposed_references_error(diagnostics));
1134 };
1135
1136 let mut entry = args.data.perms.entry(args.idx);
1137 let perm = entry.or_insert(node.default_location_state());
1138
1139 // We only count exposed nodes through which an access could happen.
1140 if node.is_exposed
1141 && perm.permission.strongest_allowed_local_access(protected).allows(access_kind)
1142 && max_local_tag.is_none_or(|max_local_tag| max_local_tag >= node.tag)
1143 {
1144 has_valid_exposed = true;
1145 }
1146
1147 let Some(relatedness) = wildcard_relatedness.to_relatedness() else {
1148 // If the access type is Either, then we do not apply any transition
1149 // to this node, but we still update each of its children.
1150 // This is an imprecision! In the future, maybe we can still do some sort
1151 // of best-effort update here.
1152 return Ok(());
1153 };
1154
1155 // We know the exact relatedness, so we can actually do precise checks.
1156 perm.perform_transition(
1157 args.idx,
1158 args.nodes,
1159 &mut args.data.exposed_cache,
1160 access_kind,
1161 relatedness,
1162 protected,
1163 diagnostics,
1164 )
1165 .map_err(|trans| {
1166 let node = args.nodes.get(args.idx).unwrap();
1167 TbError {
1168 error_kind: trans,
1169 access_info: diagnostics,
1170 conflicting_node_info: &node.debug_info,
1171 accessed_node_info: access_source
1172 .map(|idx| &args.nodes.get(idx).unwrap().debug_info),
1173 }
1174 .build()
1175 })
1176 },
1177 )?;
1178 // If there is no exposed node in this tree that allows this access, then the access *must*
1179 // be foreign to the entire subtree. Foreign accesses are only possible on wildcard subtrees
1180 // as there are no ancestors to the main root. So if we do not find a valid exposed node in
1181 // the main tree then this access is UB.
1182 if !has_valid_exposed && !is_wildcard_tree {
1183 return Err(no_valid_exposed_references_error(diagnostics)).into();
1184 }
1185 interp_ok(())
1186 }
1187}
1188
1189impl Node {
1190 pub fn default_location_state(&self) -> LocationState {
1191 LocationState::new_non_accessed(
1192 self.default_initial_perm,
1193 self.default_initial_idempotent_foreign_access,
1194 )
1195 }
1196}
1197
1198impl VisitProvenance for Tree {
1199 fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
1200 // To ensure that the roots never get removed, we visit them.
1201 // FIXME: it should be possible to GC wildcard tree roots.
1202 for id in self.roots.iter().copied() {
1203 visit(None, Some(self.nodes.get(id).unwrap().tag));
1204 }
1205 // We also need to keep around any exposed tags through which
1206 // an access could still happen.
1207 for (_id, node) in self.nodes.iter() {
1208 if node.is_exposed {
1209 visit(None, Some(node.tag))
1210 }
1211 }
1212 }
1213}
1214
1215/// Relative position of the access
1216#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1217pub enum AccessRelatedness {
1218 /// The access happened either through the node itself or one of
1219 /// its transitive children.
1220 LocalAccess,
1221 /// The access happened through this nodes ancestor or through
1222 /// a sibling/cousin/uncle/etc.
1223 ForeignAccess,
1224}
1225
1226impl AccessRelatedness {
1227 /// Check that access is either Ancestor or Distant, i.e. not
1228 /// a transitive child (initial pointer included).
1229 pub fn is_foreign(self) -> bool {
1230 matches!(self, AccessRelatedness::ForeignAccess)
1231 }
1232}