miri/borrow_tracker/stacked_borrows/mod.rs
1//! Implements "Stacked Borrows". See <https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md>
2//! for further information.
3
4pub mod diagnostics;
5mod item;
6mod stack;
7
8use std::cmp;
9use std::fmt::Write;
10use std::sync::atomic::AtomicBool;
11
12use rustc_abi::Size;
13use rustc_data_structures::fx::FxHashSet;
14use rustc_middle::mir::Mutability;
15use rustc_middle::ty::layout::HasTypingEnv;
16use rustc_middle::ty::{self, Ty};
17
18use self::diagnostics::{RetagCause, RetagInfo};
19pub use self::item::{Item, Permission};
20pub use self::stack::Stack;
21use crate::borrow_tracker::stacked_borrows::diagnostics::{
22 AllocHistory, DiagnosticCx, DiagnosticCxBuilder,
23};
24use crate::borrow_tracker::{AccessKind, GlobalStateInner, ProtectorKind};
25use crate::concurrency::data_race::{NaReadType, NaWriteType};
26use crate::*;
27
28pub type AllocState = Stacks;
29
30/// Extra per-allocation state.
31#[derive(Clone, Debug)]
32pub struct Stacks {
33 // Even reading memory can have effects on the stack, so we need a `RefCell` here.
34 stacks: DedupRangeMap<Stack>,
35 /// Stores past operations on this allocation
36 history: AllocHistory,
37 /// The set of tags that have been exposed inside this allocation.
38 exposed_tags: FxHashSet<BorTag>,
39}
40
41/// Indicates which permissions to grant to the retagged pointer.
42#[derive(Clone, Debug)]
43enum NewPermission {
44 Uniform {
45 perm: Permission,
46 access: Option<AccessKind>,
47 protector: Option<ProtectorKind>,
48 },
49 FreezeSensitive {
50 freeze_perm: Permission,
51 freeze_access: Option<AccessKind>,
52 freeze_protector: Option<ProtectorKind>,
53 nonfreeze_perm: Permission,
54 nonfreeze_access: Option<AccessKind>,
55 // nonfreeze_protector must always be None
56 },
57}
58
59impl NewPermission {
60 /// A key function: determine the permissions to grant at a retag for the given kind of
61 /// reference/pointer.
62 fn from_ref_ty<'tcx>(ty: Ty<'tcx>, mode: RetagMode, cx: &crate::MiriInterpCx<'tcx>) -> Self {
63 let protector = (mode == RetagMode::FnEntry).then_some(ProtectorKind::StrongProtector);
64 match ty.kind() {
65 ty::Ref(_, pointee, Mutability::Mut) => {
66 if mode == RetagMode::TwoPhase {
67 // We mostly just give up on 2phase-borrows, and treat these exactly like raw pointers.
68 assert!(protector.is_none()); // RetagMode can't be both FnEntry and TwoPhase.
69 NewPermission::Uniform {
70 perm: Permission::SharedReadWrite,
71 access: None,
72 protector: None,
73 }
74 } else if pointee.is_unpin(*cx.tcx, cx.typing_env())
75 && pointee.is_unsafe_unpin(*cx.tcx, cx.typing_env())
76 {
77 // A regular full mutable reference. On `FnEntry` this is `noalias` and `dereferenceable`.
78 NewPermission::Uniform {
79 perm: Permission::Unique,
80 access: Some(AccessKind::Write),
81 protector,
82 }
83 } else {
84 // `!Unpin` dereferences do not get `noalias` nor `dereferenceable`.
85 NewPermission::Uniform {
86 perm: Permission::SharedReadWrite,
87 access: None,
88 protector: None,
89 }
90 }
91 }
92 ty::RawPtr(_, Mutability::Mut) => {
93 assert!(mode == RetagMode::Raw);
94 assert!(protector.is_none()); // RetagMode can't be both FnEntry and Raw.
95 // Mutable raw pointer. No access, not protected.
96 NewPermission::Uniform {
97 perm: Permission::SharedReadWrite,
98 access: None,
99 protector: None,
100 }
101 }
102 ty::Ref(_, _pointee, Mutability::Not) => {
103 // Shared references. If frozen, these get `noalias` and `dereferenceable`; otherwise neither.
104 NewPermission::FreezeSensitive {
105 freeze_perm: Permission::SharedReadOnly,
106 freeze_access: Some(AccessKind::Read),
107 freeze_protector: protector,
108 nonfreeze_perm: Permission::SharedReadWrite,
109 // Inside UnsafeCell, this does *not* count as an access, as there
110 // might actually be mutable references further up the stack that
111 // we have to keep alive.
112 nonfreeze_access: None,
113 // We do not protect inside UnsafeCell.
114 // This fixes https://github.com/rust-lang/rust/issues/55005.
115 }
116 }
117 ty::RawPtr(_, Mutability::Not) => {
118 assert!(mode == RetagMode::Raw);
119 assert!(protector.is_none()); // RetagMode can't be both FnEntry and Raw.
120 // `*const T`, when freshly created, are read-only in the frozen part.
121 NewPermission::FreezeSensitive {
122 freeze_perm: Permission::SharedReadOnly,
123 freeze_access: Some(AccessKind::Read),
124 freeze_protector: None,
125 nonfreeze_perm: Permission::SharedReadWrite,
126 nonfreeze_access: None,
127 }
128 }
129 _ => unreachable!(),
130 }
131 }
132
133 fn from_box_ty<'tcx>(ty: Ty<'tcx>, mode: RetagMode, cx: &crate::MiriInterpCx<'tcx>) -> Self {
134 // `ty` is not the `Box` but the field of the Box with this pointer (due to allocator handling).
135 let pointee = ty.builtin_deref(true).unwrap();
136 if pointee.is_unpin(*cx.tcx, cx.typing_env())
137 && pointee.is_unsafe_unpin(*cx.tcx, cx.typing_env())
138 {
139 // A regular box. On `FnEntry` this is `noalias`, but not `dereferenceable` (hence only
140 // a weak protector).
141 NewPermission::Uniform {
142 perm: Permission::Unique,
143 access: Some(AccessKind::Write),
144 protector: (mode == RetagMode::FnEntry).then_some(ProtectorKind::WeakProtector),
145 }
146 } else {
147 // `!Unpin` boxes do not get `noalias` nor `dereferenceable`.
148 NewPermission::Uniform {
149 perm: Permission::SharedReadWrite,
150 access: None,
151 protector: None,
152 }
153 }
154 }
155
156 fn protector(&self) -> Option<ProtectorKind> {
157 match self {
158 NewPermission::Uniform { protector, .. } => *protector,
159 NewPermission::FreezeSensitive { freeze_protector, .. } => *freeze_protector,
160 }
161 }
162}
163
164// # Stacked Borrows Core Begin
165
166/// We need to make at least the following things true:
167///
168/// U1: After creating a `Uniq`, it is at the top.
169/// U2: If the top is `Uniq`, accesses must be through that `Uniq` or remove it.
170/// U3: If an access happens with a `Uniq`, it requires the `Uniq` to be in the stack.
171///
172/// F1: After creating a `&`, the parts outside `UnsafeCell` have our `SharedReadOnly` on top.
173/// F2: If a write access happens, it pops the `SharedReadOnly`. This has three pieces:
174/// F2a: If a write happens granted by an item below our `SharedReadOnly`, the `SharedReadOnly`
175/// gets popped.
176/// F2b: No `SharedReadWrite` or `Unique` will ever be added on top of our `SharedReadOnly`.
177/// F3: If an access happens with an `&` outside `UnsafeCell`,
178/// it requires the `SharedReadOnly` to still be in the stack.
179///
180/// Core relation on `Permission` to define which accesses are allowed
181impl Permission {
182 /// This defines for a given permission, whether it permits the given kind of access.
183 fn grants(self, access: AccessKind) -> bool {
184 // Disabled grants nothing. Otherwise, all items grant read access, and except for SharedReadOnly they grant write access.
185 self != Permission::Disabled
186 && (access == AccessKind::Read || self != Permission::SharedReadOnly)
187 }
188}
189
190/// Determines whether an item was invalidated by a conflicting access, or by deallocation.
191#[derive(Copy, Clone, Debug)]
192enum ItemInvalidationCause {
193 Conflict,
194 Dealloc,
195}
196
197/// Core per-location operations: access, dealloc, reborrow.
198impl<'tcx> Stack {
199 /// Find the first write-incompatible item above the given one --
200 /// i.e, find the height to which the stack will be truncated when writing to `granting`.
201 fn find_first_write_incompatible(&self, granting: usize) -> usize {
202 let perm = self.get(granting).unwrap().perm();
203 match perm {
204 Permission::SharedReadOnly => bug!("Cannot use SharedReadOnly for writing"),
205 Permission::Disabled => bug!("Cannot use Disabled for anything"),
206 Permission::Unique => {
207 // On a write, everything above us is incompatible.
208 granting + 1
209 }
210 Permission::SharedReadWrite => {
211 // The SharedReadWrite *just* above us are compatible, to skip those.
212 let mut idx = granting + 1;
213 while let Some(item) = self.get(idx) {
214 if item.perm() == Permission::SharedReadWrite {
215 // Go on.
216 idx += 1;
217 } else {
218 // Found first incompatible!
219 break;
220 }
221 }
222 idx
223 }
224 }
225 }
226
227 /// The given item was invalidated -- check its protectors for whether that will cause UB.
228 fn item_invalidated(
229 item: &Item,
230 global: &GlobalStateInner,
231 dcx: &DiagnosticCx<'_, '_, 'tcx>,
232 cause: ItemInvalidationCause,
233 ) -> InterpResult<'tcx> {
234 if !global.tracked_pointer_tags.is_empty() {
235 dcx.check_tracked_tag_popped(item, global);
236 }
237
238 if !item.protected() {
239 return interp_ok(());
240 }
241
242 // We store tags twice, once in global.protected_tags and once in each call frame.
243 // We do this because consulting a single global set in this function is faster
244 // than attempting to search all call frames in the program for the `FrameExtra`
245 // (if any) which is protecting the popped tag.
246 //
247 // This duplication trades off making `end_call` slower to make this function faster. This
248 // trade-off is profitable in practice for a combination of two reasons.
249 // 1. A single protected tag can (and does in some programs) protect thousands of `Item`s.
250 // Therefore, adding overhead in function call/return is profitable even if it only
251 // saves a little work in this function.
252 // 2. Most frames protect only one or two tags. So this duplicative global turns a search
253 // which ends up about linear in the number of protected tags in the program into a
254 // constant time check (and a slow linear, because the tags in the frames aren't contiguous).
255 if let Some(&protector_kind) = global.protected_tags.get(&item.tag()) {
256 // The only way this is okay is if the protector is weak and we are deallocating with
257 // the right pointer.
258 let allowed = matches!(cause, ItemInvalidationCause::Dealloc)
259 && matches!(protector_kind, ProtectorKind::WeakProtector);
260 if !allowed {
261 return Err(dcx.protector_error(item, protector_kind)).into();
262 }
263 }
264 interp_ok(())
265 }
266
267 /// Test if a memory `access` using pointer tagged `tag` is granted.
268 /// If yes, return the index of the item that granted it.
269 /// `range` refers the entire operation, and `offset` refers to the specific offset into the
270 /// allocation that we are currently checking.
271 fn access(
272 &mut self,
273 access: AccessKind,
274 tag: ProvenanceExtra,
275 global: &GlobalStateInner,
276 dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
277 exposed_tags: &FxHashSet<BorTag>,
278 ) -> InterpResult<'tcx> {
279 // Two main steps: Find granting item, remove incompatible items above.
280
281 // Step 1: Find granting item.
282 let granting_idx =
283 self.find_granting(access, tag, exposed_tags).map_err(|()| dcx.access_error(self))?;
284
285 // Step 2: Remove incompatible items above them. Make sure we do not remove protected
286 // items. Behavior differs for reads and writes.
287 // In case of wildcards/unknown matches, we remove everything that is *definitely* gone.
288 if access == AccessKind::Write {
289 // Remove everything above the write-compatible items, like a proper stack. This makes sure read-only and unique
290 // pointers become invalid on write accesses (ensures F2a, and ensures U2 for write accesses).
291 let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
292 // The granting_idx *might* be approximate, but any lower idx would remove more
293 // things. Even if this is a Unique and the lower idx is an SRW (which removes
294 // less), there is an SRW group boundary here so strictly more would get removed.
295 self.find_first_write_incompatible(granting_idx)
296 } else {
297 // We are writing to something in the unknown part.
298 // There is a SRW group boundary between the unknown and the known, so everything is incompatible.
299 0
300 };
301 self.pop_items_after(first_incompatible_idx, |item| {
302 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
303 dcx.log_invalidation(item.tag());
304 interp_ok(())
305 })?;
306 } else {
307 // On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
308 // The reason this is not following the stack discipline (by removing the first Unique and
309 // everything on top of it) is that in `let raw = &mut *x as *mut _; let _val = *x;`, the second statement
310 // would pop the `Unique` from the reborrow of the first statement, and subsequently also pop the
311 // `SharedReadWrite` for `raw`.
312 // This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
313 // reference and use that.
314 // We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
315 let first_incompatible_idx = if let Some(granting_idx) = granting_idx {
316 // The granting_idx *might* be approximate, but any lower idx would disable more things.
317 granting_idx + 1
318 } else {
319 // We are reading from something in the unknown part. That means *all* `Unique` we know about are dead now.
320 0
321 };
322 self.disable_uniques_starting_at(first_incompatible_idx, |item| {
323 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
324 dcx.log_invalidation(item.tag());
325 interp_ok(())
326 })?;
327 }
328
329 // If this was an approximate action, we now collapse everything into an unknown.
330 if granting_idx.is_none() || matches!(tag, ProvenanceExtra::Wildcard) {
331 // Compute the upper bound of the items that remain.
332 // (This is why we did all the work above: to reduce the items we have to consider here.)
333 let mut max = BorTag::one();
334 for i in 0..self.len() {
335 let item = self.get(i).unwrap();
336 // Skip disabled items, they cannot be matched anyway.
337 if !matches!(item.perm(), Permission::Disabled) {
338 // We are looking for a strict upper bound, so add 1 to this tag.
339 max = cmp::max(item.tag().succ().unwrap(), max);
340 }
341 }
342 if let Some(unk) = self.unknown_bottom() {
343 max = cmp::max(unk, max);
344 }
345 // Use `max` as new strict upper bound for everything.
346 trace!(
347 "access: forgetting stack to upper bound {max} due to wildcard or unknown access",
348 max = max.get(),
349 );
350 self.set_unknown_bottom(max);
351 }
352
353 // Done.
354 interp_ok(())
355 }
356
357 /// Deallocate a location: Like a write access, but also there must be no
358 /// active protectors at all because we will remove all items.
359 fn dealloc(
360 &mut self,
361 tag: ProvenanceExtra,
362 global: &GlobalStateInner,
363 dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
364 exposed_tags: &FxHashSet<BorTag>,
365 ) -> InterpResult<'tcx> {
366 // Step 1: Make a write access.
367 // As part of this we do regular protector checking, i.e. even weakly protected items cause UB when popped.
368 self.access(AccessKind::Write, tag, global, dcx, exposed_tags)?;
369
370 // Step 2: Pretend we remove the remaining items, checking if any are strongly protected.
371 for idx in (0..self.len()).rev() {
372 let item = self.get(idx).unwrap();
373 Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Dealloc)?;
374 }
375
376 interp_ok(())
377 }
378
379 /// Derive a new pointer from one with the given tag.
380 ///
381 /// `access` indicates which kind of memory access this retag itself should correspond to.
382 fn grant(
383 &mut self,
384 derived_from: ProvenanceExtra,
385 new: Item,
386 access: Option<AccessKind>,
387 global: &GlobalStateInner,
388 dcx: &mut DiagnosticCx<'_, '_, 'tcx>,
389 exposed_tags: &FxHashSet<BorTag>,
390 ) -> InterpResult<'tcx> {
391 dcx.start_grant(new.perm());
392
393 // Compute where to put the new item.
394 // Either way, we ensure that we insert the new item in a way such that between
395 // `derived_from` and the new one, there are only items *compatible with* `derived_from`.
396 let new_idx = if let Some(access) = access {
397 // Simple case: We are just a regular memory access, and then push our thing on top,
398 // like a regular stack.
399 // This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
400 self.access(access, derived_from, global, dcx, exposed_tags)?;
401
402 // We insert "as far up as possible": We know only compatible items are remaining
403 // on top of `derived_from`, and we want the new item at the top so that we
404 // get the strongest possible guarantees.
405 // This ensures U1 and F1.
406 self.len()
407 } else {
408 // The tricky case: creating a new SRW permission without actually being an access.
409 assert!(new.perm() == Permission::SharedReadWrite);
410
411 // First we figure out which item grants our parent (`derived_from`) this kind of access.
412 // We use that to determine where to put the new item.
413 let granting_idx = self
414 .find_granting(AccessKind::Write, derived_from, exposed_tags)
415 .map_err(|()| dcx.grant_error(self))?;
416
417 let (Some(granting_idx), ProvenanceExtra::Concrete(_)) = (granting_idx, derived_from)
418 else {
419 // The parent is a wildcard pointer or matched the unknown bottom.
420 // This is approximate. Nobody knows what happened, so forget everything.
421 // The new thing is SRW anyway, so we cannot push it "on top of the unknown part"
422 // (for all we know, it might join an SRW group inside the unknown).
423 trace!(
424 "reborrow: forgetting stack entirely due to SharedReadWrite reborrow from wildcard or unknown"
425 );
426 self.set_unknown_bottom(global.next_ptr_tag);
427 return interp_ok(());
428 };
429
430 // SharedReadWrite can coexist with "existing loans", meaning they don't act like a write
431 // access. Instead of popping the stack, we insert the item at the place the stack would
432 // be popped to (i.e., we insert it above all the write-compatible items).
433 // This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
434 self.find_first_write_incompatible(granting_idx)
435 };
436
437 // Put the new item there.
438 trace!("reborrow: adding item {:?}", new);
439 self.insert(new_idx, new);
440 interp_ok(())
441 }
442}
443// # Stacked Borrows Core End
444
445/// Integration with the BorTag garbage collector
446impl Stacks {
447 pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<BorTag>) {
448 for (_stack_range, stack) in self.stacks.iter_mut_all() {
449 stack.retain(live_tags);
450 }
451 self.history.retain(live_tags);
452 }
453}
454
455impl VisitProvenance for Stacks {
456 fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
457 for tag in self.exposed_tags.iter().copied() {
458 visit(None, Some(tag));
459 }
460 }
461}
462
463/// Map per-stack operations to higher-level per-location-range operations.
464impl<'tcx> Stacks {
465 /// Creates a new stack with an initial tag. For diagnostic purposes, we also need to know
466 /// the [`AllocId`] of the allocation this is associated with.
467 fn new(
468 size: Size,
469 perm: Permission,
470 tag: BorTag,
471 id: AllocId,
472 machine: &MiriMachine<'_>,
473 ) -> Self {
474 let item = Item::new(tag, perm, false);
475 let stack = Stack::new(item);
476
477 Stacks {
478 stacks: DedupRangeMap::new(size, stack),
479 history: AllocHistory::new(id, item, machine),
480 exposed_tags: FxHashSet::default(),
481 }
482 }
483
484 /// Call `f` on every stack in the range.
485 fn for_each(
486 &mut self,
487 range: AllocRange,
488 mut dcx_builder: DiagnosticCxBuilder<'_, 'tcx>,
489 mut f: impl FnMut(
490 &mut Stack,
491 &mut DiagnosticCx<'_, '_, 'tcx>,
492 &mut FxHashSet<BorTag>,
493 ) -> InterpResult<'tcx>,
494 ) -> InterpResult<'tcx> {
495 for (stack_range, stack) in self.stacks.iter_mut(range.start, range.size) {
496 let mut dcx = dcx_builder.build(&mut self.history, Size::from_bytes(stack_range.start));
497 f(stack, &mut dcx, &mut self.exposed_tags)?;
498 dcx_builder = dcx.unbuild();
499 }
500 interp_ok(())
501 }
502}
503
504/// Glue code to connect with Miri Machine Hooks
505impl Stacks {
506 pub fn new_allocation(
507 id: AllocId,
508 size: Size,
509 state: &mut GlobalStateInner,
510 kind: MemoryKind,
511 machine: &MiriMachine<'_>,
512 ) -> Self {
513 let (base_tag, perm) = match kind {
514 // New unique borrow. This tag is not accessible by the program,
515 // so it will only ever be used when using the local directly (i.e.,
516 // not through a pointer). That is, whenever we directly write to a local, this will pop
517 // everything else off the stack, invalidating all previous pointers,
518 // and in particular, *all* raw pointers.
519 MemoryKind::Stack => (state.root_ptr_tag(id, machine), Permission::Unique),
520 // Everything else is shared by default.
521 _ => (state.root_ptr_tag(id, machine), Permission::SharedReadWrite),
522 };
523 Stacks::new(size, perm, base_tag, id, machine)
524 }
525
526 #[inline(always)]
527 pub fn before_memory_read<'ecx, 'tcx>(
528 &mut self,
529 alloc_id: AllocId,
530 tag: ProvenanceExtra,
531 range: AllocRange,
532 machine: &'ecx MiriMachine<'tcx>,
533 ) -> InterpResult<'tcx>
534 where
535 'tcx: 'ecx,
536 {
537 trace!(
538 "read access with tag {:?}: {:?}, size {}",
539 tag,
540 interpret::Pointer::new(alloc_id, range.start),
541 range.size.bytes()
542 );
543 let dcx = DiagnosticCxBuilder::read(machine, tag, range);
544 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
545 self.for_each(range, dcx, |stack, dcx, exposed_tags| {
546 stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags)
547 })
548 }
549
550 #[inline(always)]
551 pub fn before_memory_write<'tcx>(
552 &mut self,
553 alloc_id: AllocId,
554 tag: ProvenanceExtra,
555 range: AllocRange,
556 machine: &MiriMachine<'tcx>,
557 ) -> InterpResult<'tcx> {
558 trace!(
559 "write access with tag {:?}: {:?}, size {}",
560 tag,
561 interpret::Pointer::new(alloc_id, range.start),
562 range.size.bytes()
563 );
564 let dcx = DiagnosticCxBuilder::write(machine, tag, range);
565 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
566 self.for_each(range, dcx, |stack, dcx, exposed_tags| {
567 stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags)
568 })
569 }
570
571 #[inline(always)]
572 pub fn before_memory_deallocation<'tcx>(
573 &mut self,
574 alloc_id: AllocId,
575 tag: ProvenanceExtra,
576 size: Size,
577 machine: &MiriMachine<'tcx>,
578 ) -> InterpResult<'tcx> {
579 trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, size.bytes());
580 let dcx = DiagnosticCxBuilder::dealloc(machine, tag);
581 let state = machine.borrow_tracker.as_ref().unwrap().borrow();
582 self.for_each(alloc_range(Size::ZERO, size), dcx, |stack, dcx, exposed_tags| {
583 stack.dealloc(tag, &state, dcx, exposed_tags)
584 })?;
585 interp_ok(())
586 }
587}
588
589/// Retagging/reborrowing. There is some policy in here, such as which permissions
590/// to grant for which references, and when to add protectors.
591impl<'tcx, 'ecx> EvalContextPrivExt<'tcx, 'ecx> for crate::MiriInterpCx<'tcx> {}
592trait EvalContextPrivExt<'tcx, 'ecx>: crate::MiriInterpCxExt<'tcx> {
593 /// Returns the provenance that should be used henceforth.
594 fn sb_reborrow(
595 &mut self,
596 place: &MPlaceTy<'tcx>,
597 size: Size,
598 new_perm: NewPermission,
599 new_tag: BorTag,
600 retag_info: RetagInfo, // diagnostics info about this retag
601 ) -> InterpResult<'tcx, Option<Provenance>> {
602 let this = self.eval_context_mut();
603 // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050).
604 this.check_ptr_access(place.ptr(), size, CheckInAllocMsg::Dereferenceable)?;
605
606 // It is crucial that this gets called on all code paths, to ensure we track tag creation.
607 let log_creation = |this: &MiriInterpCx<'tcx>,
608 loc: Option<(AllocId, Size, ProvenanceExtra)>| // alloc_id, base_offset, orig_tag
609 -> InterpResult<'tcx> {
610 let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
611 let ty = place.layout.ty;
612 if global.tracked_pointer_tags.contains(&new_tag) {
613 let mut kind_str = String::new();
614 match new_perm {
615 NewPermission::Uniform { perm, .. } =>
616 write!(kind_str, "{perm:?} permission").unwrap(),
617 NewPermission::FreezeSensitive { freeze_perm, .. } if ty.is_freeze(*this.tcx, this.typing_env()) =>
618 write!(kind_str, "{freeze_perm:?} permission").unwrap(),
619 NewPermission::FreezeSensitive { freeze_perm, nonfreeze_perm, .. } =>
620 write!(kind_str, "{freeze_perm:?}/{nonfreeze_perm:?} permission for frozen/non-frozen parts").unwrap(),
621 }
622 write!(kind_str, " (pointee type {ty})").unwrap();
623 this.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(
624 new_tag.inner(),
625 Some(kind_str),
626 loc.map(|(alloc_id, base_offset, orig_tag)| (alloc_id, alloc_range(base_offset, size), orig_tag)),
627 ));
628 }
629 drop(global); // don't hold that reference any longer than we have to
630
631 let Some((alloc_id, base_offset, orig_tag)) = loc else {
632 return interp_ok(())
633 };
634
635 let alloc_kind = this.get_alloc_info(alloc_id).kind;
636 match alloc_kind {
637 AllocKind::LiveData => {
638 // This should have alloc_extra data, but `get_alloc_extra` can still fail
639 // if converting this alloc_id from a global to a local one
640 // uncovers a non-supported `extern static`.
641 let extra = this.get_alloc_extra(alloc_id)?;
642 let mut stacked_borrows = extra
643 .borrow_tracker_sb()
644 .borrow_mut();
645 // Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag.
646 // FIXME: can this be done cleaner?
647 let dcx = DiagnosticCxBuilder::retag(
648 &this.machine,
649 retag_info,
650 new_tag,
651 orig_tag,
652 alloc_range(base_offset, size),
653 );
654 let mut dcx = dcx.build(&mut stacked_borrows.history, base_offset);
655 dcx.log_creation();
656 if new_perm.protector().is_some() {
657 dcx.log_protector();
658 }
659 },
660 AllocKind::Function | AllocKind::VTable | AllocKind::TypeId | AllocKind::Dead | AllocKind::VaList => {
661 // No stacked borrows on these allocations.
662 }
663 }
664 interp_ok(())
665 };
666
667 if size == Size::ZERO {
668 trace!(
669 "reborrow of size 0: reference {:?} derived from {:?} (pointee {}) with permissions {new_perm:?}",
670 new_tag,
671 place.ptr(),
672 place.layout.ty,
673 );
674 // Don't update any stacks for a zero-sized access; borrow stacks are per-byte and this
675 // touches no bytes so there is no stack to put this tag in.
676 // However, if the pointer for this operation points at a real allocation we still
677 // record where it was created so that we can issue a helpful diagnostic if there is an
678 // attempt to use it for a non-zero-sized access.
679 // Dangling slices are a common case here; it's valid to get their length but with raw
680 // pointer tagging for example all calls to get_unchecked on them are invalid.
681 if let Ok((alloc_id, base_offset, orig_tag)) = this.ptr_try_get_alloc_id(place.ptr(), 0)
682 {
683 log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
684 // Still give it the new provenance, it got retagged after all. If this was a
685 // wildcard pointer, this will fix the AllocId and make future accesses with this
686 // reference to other allocations UB, but that's fine: due to subobject provenance,
687 // *all* future accesses with this reference should be UB!
688 return interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }));
689 } else {
690 // This pointer doesn't come with an AllocId. :shrug:
691 log_creation(this, None)?;
692 // Provenance unchanged. Ideally we'd make this pointer UB to use like above,
693 // but there's no easy way to do that.
694 return interp_ok(place.ptr().provenance);
695 }
696 }
697
698 // The pointer *must* have a valid AllocId to continue, so we want to resolve this to
699 // a concrete ID even for wildcard pointers.
700 let (alloc_id, base_offset, orig_tag) = this.ptr_get_alloc_id(place.ptr(), 0)?;
701 log_creation(this, Some((alloc_id, base_offset, orig_tag)))?;
702
703 trace!(
704 "reborrow: reference {:?} derived from {:?} (pointee {}) with permissions {new_perm:?}: {:?}, size {}",
705 new_tag,
706 orig_tag,
707 place.layout.ty,
708 interpret::Pointer::new(alloc_id, base_offset),
709 size.bytes()
710 );
711
712 if let Some(protect) = new_perm.protector() {
713 // See comment in `Stack::item_invalidated` for why we store the tag twice.
714 this.frame_mut()
715 .extra
716 .borrow_tracker
717 .as_mut()
718 .unwrap()
719 .protected_tags
720 .push((alloc_id, new_tag));
721 this.machine
722 .borrow_tracker
723 .as_mut()
724 .unwrap()
725 .get_mut()
726 .protected_tags
727 .insert(new_tag, protect);
728 }
729
730 // Update the stacks, according to the new permission information we are given.
731 match new_perm {
732 NewPermission::Uniform { perm, access, protector } => {
733 assert!(perm != Permission::SharedReadOnly);
734 // Here we can avoid `borrow()` calls because we have mutable references.
735 // Note that this asserts that the allocation is mutable -- but since we are creating a
736 // mutable pointer, that seems reasonable.
737 let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
738 let stacked_borrows = alloc_extra.borrow_tracker_sb_mut().get_mut();
739 let item = Item::new(new_tag, perm, protector.is_some());
740 let range = alloc_range(base_offset, size);
741 let global = machine.borrow_tracker.as_ref().unwrap().borrow();
742 let dcx = DiagnosticCxBuilder::retag(
743 machine,
744 retag_info,
745 new_tag,
746 orig_tag,
747 alloc_range(base_offset, size),
748 );
749 stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
750 stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
751 })?;
752 drop(global);
753 if let Some(access) = access {
754 assert_eq!(access, AccessKind::Write);
755 // Make sure the data race model also knows about this.
756 // FIXME(genmc): Ensure this is still done in GenMC mode. Check for other places where GenMC may need to be informed.
757 if let Some(data_race) = alloc_extra.data_race.as_vclocks_mut() {
758 data_race.write_non_atomic(
759 alloc_id,
760 range,
761 NaWriteType::Retag,
762 Some(place.layout.ty),
763 machine,
764 )?;
765 }
766 }
767 }
768 NewPermission::FreezeSensitive {
769 freeze_perm,
770 freeze_access,
771 freeze_protector,
772 nonfreeze_perm,
773 nonfreeze_access,
774 } => {
775 // The permission is not uniform across the entire range!
776 // We need a frozen-sensitive reborrow.
777 // We have to use shared references to alloc/memory_extra here since
778 // `visit_freeze_sensitive` needs to access the global state.
779 let alloc_extra = this.get_alloc_extra(alloc_id)?;
780 let mut stacked_borrows = alloc_extra.borrow_tracker_sb().borrow_mut();
781 this.visit_freeze_sensitive(place, size, |mut range, frozen| {
782 // Adjust range.
783 range.start += base_offset;
784 // We are only ever `SharedReadOnly` inside the frozen bits.
785 let (perm, access, protector) = if frozen {
786 (freeze_perm, freeze_access, freeze_protector)
787 } else {
788 (nonfreeze_perm, nonfreeze_access, None)
789 };
790 let item = Item::new(new_tag, perm, protector.is_some());
791 let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
792 let dcx = DiagnosticCxBuilder::retag(
793 &this.machine,
794 retag_info,
795 new_tag,
796 orig_tag,
797 alloc_range(base_offset, size),
798 );
799 stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
800 stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
801 })?;
802 drop(global);
803 if let Some(access) = access {
804 assert_eq!(access, AccessKind::Read);
805 // Make sure the data race model also knows about this.
806 if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() {
807 data_race.read_non_atomic(
808 alloc_id,
809 range,
810 NaReadType::Retag,
811 Some(place.layout.ty),
812 &this.machine,
813 )?;
814 }
815 }
816 interp_ok(())
817 })?;
818 }
819 }
820
821 interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag }))
822 }
823
824 fn sb_retag_place(
825 &mut self,
826 place: &MPlaceTy<'tcx>,
827 new_perm: NewPermission,
828 info: RetagInfo, // diagnostics info about this retag
829 ) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
830 let this = self.eval_context_mut();
831 let size = this.size_and_align_of_val(place)?.map(|(size, _)| size);
832 // FIXME: If we cannot determine the size (because the unsized tail is an `extern type`),
833 // bail out -- we cannot reasonably figure out which memory range to reborrow.
834 // See https://github.com/rust-lang/unsafe-code-guidelines/issues/276.
835 let Some(size) = size else {
836 static DEDUP: AtomicBool = AtomicBool::new(false);
837 if !DEDUP.swap(true, std::sync::atomic::Ordering::Relaxed) {
838 this.emit_diagnostic(NonHaltingDiagnostic::ExternTypeReborrow);
839 }
840 return interp_ok(place.clone());
841 };
842
843 // Compute new borrow.
844 let new_tag = this.machine.borrow_tracker.as_mut().unwrap().get_mut().new_ptr();
845
846 // Reborrow.
847 let new_prov = this.sb_reborrow(place, size, new_perm, new_tag, info)?;
848
849 // Adjust place.
850 // (If the closure gets called, that means the old provenance was `Some`, and hence the new
851 // one must also be `Some`.)
852 interp_ok(place.clone().map_provenance(|_| new_prov.unwrap()))
853 }
854}
855
856impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
857pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
858 fn sb_retag_ptr_value(
859 &mut self,
860 val: &ImmTy<'tcx>,
861 ty: Ty<'tcx>,
862 mode: RetagMode,
863 ) -> InterpResult<'tcx, Option<ImmTy<'tcx>>> {
864 let this = self.eval_context_mut();
865 let cause = match mode {
866 RetagMode::TwoPhase => RetagCause::TwoPhase,
867 RetagMode::FnEntry => RetagCause::FnEntry,
868 RetagMode::Raw | RetagMode::Default => RetagCause::Normal,
869 RetagMode::None => return interp_ok(None), // no retagging
870 };
871 let new_perm = if ty.is_box() {
872 if ty.is_box_global(*this.tcx) {
873 NewPermission::from_box_ty(val.layout.ty, mode, this)
874 } else {
875 // Boxes with local allocator are not retagged.
876 return interp_ok(None);
877 }
878 } else {
879 NewPermission::from_ref_ty(val.layout.ty, mode, this)
880 };
881
882 let info = RetagInfo { cause };
883 let place = this.imm_ptr_to_mplace(val)?;
884 let new_place = this.sb_retag_place(&place, new_perm, info)?;
885 interp_ok(Some(ImmTy::from_immediate(new_place.to_ref(this), val.layout)))
886 }
887
888 /// Protect a place so that it cannot be used any more for the duration of the current function
889 /// call.
890 ///
891 /// This is used to ensure soundness of in-place function argument/return passing.
892 fn sb_protect_place(&mut self, place: &MPlaceTy<'tcx>) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
893 let this = self.eval_context_mut();
894
895 // Retag it. With protection! That is the entire point.
896 let new_perm = NewPermission::Uniform {
897 perm: Permission::Unique,
898 access: Some(AccessKind::Write),
899 protector: Some(ProtectorKind::StrongProtector),
900 };
901 this.sb_retag_place(place, new_perm, RetagInfo { cause: RetagCause::InPlaceFnPassing })
902 }
903
904 /// Mark the given tag as exposed. It was found on a pointer with the given AllocId.
905 fn sb_expose_tag(&self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> {
906 let this = self.eval_context_ref();
907
908 // Function pointers and dead objects don't have an alloc_extra so we ignore them.
909 // This is okay because accessing them is UB anyway, no need for any Stacked Borrows checks.
910 // NOT using `get_alloc_extra_mut` since this might be a read-only allocation!
911 let kind = this.get_alloc_info(alloc_id).kind;
912 match kind {
913 AllocKind::LiveData => {
914 // This should have alloc_extra data, but `get_alloc_extra` can still fail
915 // if converting this alloc_id from a global to a local one
916 // uncovers a non-supported `extern static`.
917 let alloc_extra = this.get_alloc_extra(alloc_id)?;
918 trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}");
919 alloc_extra.borrow_tracker_sb().borrow_mut().exposed_tags.insert(tag);
920 }
921 AllocKind::Function
922 | AllocKind::VTable
923 | AllocKind::TypeId
924 | AllocKind::Dead
925 | AllocKind::VaList => {
926 // No stacked borrows on these allocations.
927 }
928 }
929 interp_ok(())
930 }
931
932 fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> {
933 let this = self.eval_context_mut();
934 let alloc_extra = this.get_alloc_extra(alloc_id)?;
935 let stacks = alloc_extra.borrow_tracker_sb().borrow();
936 for (range, stack) in stacks.stacks.iter_all() {
937 print!("{range:?}: [");
938 if let Some(bottom) = stack.unknown_bottom() {
939 print!(" unknown-bottom(..{bottom:?})");
940 }
941 for i in 0..stack.len() {
942 let item = stack.get(i).unwrap();
943 print!(" {:?}{:?}", item.perm(), item.tag());
944 }
945 println!(" ]");
946 }
947 interp_ok(())
948 }
949}