Skip to main content

miri/concurrency/
thread.rs

1//! Implements threads.
2
3use std::sync::atomic::Ordering::Relaxed;
4use std::task::Poll;
5use std::time::{Duration, SystemTime};
6use std::{io, mem};
7
8use rand::seq::IteratorRandom;
9use rustc_abi::ExternAbi;
10use rustc_const_eval::CTRL_C_RECEIVED;
11use rustc_data_structures::either::Either;
12use rustc_data_structures::fx::FxHashMap;
13use rustc_hir::def_id::DefId;
14use rustc_index::{Idx, IndexVec};
15use rustc_middle::mir::Mutability;
16use rustc_middle::ty::layout::TyAndLayout;
17use rustc_span::{DUMMY_SP, Span};
18use rustc_target::spec::Os;
19
20use crate::concurrency::GlobalDataRaceHandler;
21use crate::concurrency::blocking_io::InterestReceiver;
22use crate::shims::tls;
23use crate::*;
24
25#[derive(Clone, Copy, Debug, PartialEq)]
26enum SchedulingAction {
27    /// Execute step on the active thread.
28    ExecuteStep,
29    /// Wait for a bit, but at most as long as the duration specified.
30    /// We wake up early if an I/O event happened.
31    /// If the duration is [`None`], we sleep indefinitely. This is
32    /// only allowed when isolation is disabled and there are threads waiting for I/O!
33    SleepAndWaitForIo(Option<Duration>),
34}
35
36/// What to do with TLS allocations from terminated threads
37#[derive(Clone, Copy, Debug, PartialEq)]
38pub enum TlsAllocAction {
39    /// Deallocate backing memory of thread-local statics as usual
40    Deallocate,
41    /// Skip deallocating backing memory of thread-local statics and consider all memory reachable
42    /// from them as "allowed to leak" (like global `static`s).
43    Leak,
44}
45
46/// The argument type for the "unblock" callback, indicating why the thread got unblocked.
47#[derive(Clone, Copy, Debug, PartialEq)]
48pub enum UnblockKind {
49    /// Operation completed successfully, thread continues normal execution.
50    Ready,
51    /// The operation did not complete within its specified duration.
52    TimedOut,
53}
54
55/// Type alias for unblock callbacks, i.e. machine callbacks invoked when
56/// a thread gets unblocked.
57pub type DynUnblockCallback<'tcx> = DynMachineCallback<'tcx, UnblockKind>;
58
59/// A thread identifier.
60#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
61pub struct ThreadId(u32);
62
63impl ThreadId {
64    pub fn to_u32(self) -> u32 {
65        self.0
66    }
67
68    /// Create a new thread id from a `u32` without checking if this thread exists.
69    pub fn new_unchecked(id: u32) -> Self {
70        Self(id)
71    }
72
73    pub const MAIN_THREAD: ThreadId = ThreadId(0);
74}
75
76impl Idx for ThreadId {
77    fn new(idx: usize) -> Self {
78        ThreadId(u32::try_from(idx).unwrap())
79    }
80
81    fn index(self) -> usize {
82        usize::try_from(self.0).unwrap()
83    }
84}
85
86impl From<ThreadId> for u64 {
87    fn from(t: ThreadId) -> Self {
88        t.0.into()
89    }
90}
91
92/// Keeps track of what the thread is blocked on.
93#[derive(Debug, Clone, PartialEq, Eq)]
94pub enum BlockReason {
95    /// The thread tried to join the specified thread and is blocked until that
96    /// thread terminates.
97    Join(ThreadId),
98    /// Waiting for time to pass.
99    Sleep,
100    /// Blocked on a mutex.
101    Mutex,
102    /// Blocked on a condition variable.
103    Condvar,
104    /// Blocked on a reader-writer lock.
105    RwLock,
106    /// Blocked on a Futex variable.
107    Futex,
108    /// Blocked on an InitOnce.
109    InitOnce,
110    /// Blocked on epoll.
111    Epoll,
112    /// Blocked on eventfd.
113    Eventfd,
114    /// Blocked on virtual socket.
115    VirtualSocket,
116    /// Blocked on an IO operation.
117    IO,
118    /// Blocked for any reason related to GenMC, such as `assume` statements (GenMC mode only).
119    /// Will be implicitly unblocked when GenMC schedules this thread again.
120    Genmc,
121}
122
123/// The state of a thread.
124enum ThreadState<'tcx> {
125    /// The thread is enabled and can be executed.
126    Enabled,
127    /// The thread is blocked on something.
128    Blocked { reason: BlockReason, timeout: Option<Timeout>, callback: DynUnblockCallback<'tcx> },
129    /// The thread has terminated its execution. We do not delete terminated
130    /// threads (FIXME: why?).
131    Terminated,
132}
133
134impl<'tcx> std::fmt::Debug for ThreadState<'tcx> {
135    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
136        match self {
137            Self::Enabled => write!(f, "Enabled"),
138            Self::Blocked { reason, timeout, .. } =>
139                f.debug_struct("Blocked").field("reason", reason).field("timeout", timeout).finish(),
140            Self::Terminated => write!(f, "Terminated"),
141        }
142    }
143}
144
145impl<'tcx> ThreadState<'tcx> {
146    fn is_enabled(&self) -> bool {
147        matches!(self, ThreadState::Enabled)
148    }
149
150    fn is_terminated(&self) -> bool {
151        matches!(self, ThreadState::Terminated)
152    }
153
154    fn is_blocked_on(&self, reason: &BlockReason) -> bool {
155        matches!(self, ThreadState::Blocked { reason: actual_reason, .. } if actual_reason == reason)
156    }
157}
158
159/// The join status of a thread.
160#[derive(Debug, Copy, Clone, PartialEq, Eq)]
161enum ThreadJoinStatus {
162    /// The thread can be joined.
163    Joinable,
164    /// A thread is detached if its join handle was destroyed and no other
165    /// thread can join it.
166    Detached,
167    /// The thread was already joined by some thread and cannot be joined again.
168    Joined,
169}
170
171/// A thread.
172pub struct Thread<'tcx> {
173    state: ThreadState<'tcx>,
174
175    /// Name of the thread.
176    thread_name: Option<Vec<u8>>,
177
178    /// The virtual call stack.
179    stack: Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>>,
180
181    /// A span that explains where the thread (or more specifically, its current root
182    /// frame) "comes from".
183    pub(crate) origin_span: Span,
184
185    /// The function to call when the stack ran empty, to figure out what to do next.
186    /// Conceptually, this is the interpreter implementation of the things that happen 'after' the
187    /// Rust language entry point for this thread returns (usually implemented by the C or OS runtime).
188    /// (`None` is an error, it means the callback has not been set up yet or is actively running.)
189    pub(crate) on_stack_empty: Option<StackEmptyCallback<'tcx>>,
190
191    /// The index of the topmost user-relevant frame in `stack`. This field must contain
192    /// the value produced by `get_top_user_relevant_frame`.
193    /// This field is a cache to reduce how often we call that method. The cache is manually
194    /// maintained inside `MiriMachine::after_stack_push` and `MiriMachine::after_stack_pop`.
195    top_user_relevant_frame: Option<usize>,
196
197    /// The join status.
198    join_status: ThreadJoinStatus,
199
200    /// Stack of active unwind payloads for the current thread. Used for storing
201    /// the argument of the call to `miri_start_unwind` (the payload) when unwinding.
202    /// This is pointer-sized, and matches the `Payload` type in `src/libpanic_unwind/miri.rs`.
203    ///
204    /// In real unwinding, the payload gets passed as an argument to the landing pad,
205    /// which then forwards it to 'Resume'. However this argument is implicit in MIR,
206    /// so we have to store it out-of-band. When there are multiple active unwinds,
207    /// the innermost one is always caught first, so we can store them as a stack.
208    pub(crate) unwind_payloads: Vec<ImmTy<'tcx>>,
209
210    /// Last OS error location in memory. It is a 32-bit integer.
211    pub(crate) last_error: Option<MPlaceTy<'tcx>>,
212}
213
214pub type StackEmptyCallback<'tcx> =
215    Box<dyn FnMut(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, Poll<()>> + 'tcx>;
216
217impl<'tcx> Thread<'tcx> {
218    /// Get the name of the current thread if it was set.
219    fn thread_name(&self) -> Option<&[u8]> {
220        self.thread_name.as_deref()
221    }
222
223    /// Return whether this thread is enabled or not.
224    pub fn is_enabled(&self) -> bool {
225        self.state.is_enabled()
226    }
227
228    /// Get the name of the current thread for display purposes; will include thread ID if not set.
229    fn thread_display_name(&self, id: ThreadId) -> String {
230        if let Some(ref thread_name) = self.thread_name {
231            String::from_utf8_lossy(thread_name).into_owned()
232        } else {
233            format!("unnamed-{}", id.index())
234        }
235    }
236
237    /// Return the top user-relevant frame, if there is one. `skip` indicates how many top frames
238    /// should be skipped.
239    /// Note that the choice to return `None` here when there is no user-relevant frame is part of
240    /// justifying the optimization that only pushes of user-relevant frames require updating the
241    /// `top_user_relevant_frame` field.
242    fn compute_top_user_relevant_frame(&self, skip: usize) -> Option<usize> {
243        // We are search for the frame with maximum relevance.
244        let mut best = None;
245        for (idx, frame) in self.stack.iter().enumerate().rev().skip(skip) {
246            let relevance = frame.extra.user_relevance;
247            if relevance == u8::MAX {
248                // We can short-circuit this search.
249                return Some(idx);
250            }
251            if best.is_none_or(|(_best_idx, best_relevance)| best_relevance < relevance) {
252                // The previous best frame has strictly worse relevance, so despite us being lower
253                // in the stack, we win.
254                best = Some((idx, relevance));
255            }
256        }
257        best.map(|(idx, _relevance)| idx)
258    }
259
260    /// Re-compute the top user-relevant frame from scratch. `skip` indicates how many top frames
261    /// should be skipped.
262    pub fn recompute_top_user_relevant_frame(&mut self, skip: usize) {
263        self.top_user_relevant_frame = self.compute_top_user_relevant_frame(skip);
264    }
265
266    /// Set the top user-relevant frame to the given value. Must be equal to what
267    /// `get_top_user_relevant_frame` would return!
268    pub fn set_top_user_relevant_frame(&mut self, frame_idx: usize) {
269        debug_assert_eq!(Some(frame_idx), self.compute_top_user_relevant_frame(0));
270        self.top_user_relevant_frame = Some(frame_idx);
271    }
272
273    /// Returns the topmost frame that is considered user-relevant, or the
274    /// top of the stack if there is no such frame, or `None` if the stack is empty.
275    pub fn top_user_relevant_frame(&self) -> Option<usize> {
276        // This can be called upon creation of an allocation. We create allocations while setting up
277        // parts of the Rust runtime when we do not have any stack frames yet, so we need to handle
278        // empty stacks.
279        self.top_user_relevant_frame.or_else(|| self.stack.len().checked_sub(1))
280    }
281
282    pub fn current_user_relevance(&self) -> u8 {
283        self.top_user_relevant_frame()
284            .map(|frame_idx| self.stack[frame_idx].extra.user_relevance)
285            .unwrap_or(0)
286    }
287
288    pub fn current_user_relevant_span(&self) -> Span {
289        debug_assert_eq!(self.top_user_relevant_frame, self.compute_top_user_relevant_frame(0));
290        self.top_user_relevant_frame()
291            .map(|frame_idx| self.stack[frame_idx].current_span())
292            .unwrap_or(rustc_span::DUMMY_SP)
293    }
294}
295
296impl<'tcx> std::fmt::Debug for Thread<'tcx> {
297    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
298        write!(
299            f,
300            "{}({:?}, {:?})",
301            String::from_utf8_lossy(self.thread_name().unwrap_or(b"<unnamed>")),
302            self.state,
303            self.join_status
304        )
305    }
306}
307
308impl<'tcx> Thread<'tcx> {
309    fn new(name: Option<&str>, on_stack_empty: Option<StackEmptyCallback<'tcx>>) -> Self {
310        Self {
311            state: ThreadState::Enabled,
312            thread_name: name.map(|name| Vec::from(name.as_bytes())),
313            stack: Vec::new(),
314            origin_span: DUMMY_SP,
315            top_user_relevant_frame: None,
316            join_status: ThreadJoinStatus::Joinable,
317            unwind_payloads: Vec::new(),
318            last_error: None,
319            on_stack_empty,
320        }
321    }
322}
323
324impl VisitProvenance for Thread<'_> {
325    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
326        let Thread {
327            unwind_payloads: panic_payload,
328            last_error,
329            stack,
330            origin_span: _,
331            top_user_relevant_frame: _,
332            state: _,
333            thread_name: _,
334            join_status: _,
335            on_stack_empty: _, // we assume the closure captures no GC-relevant state
336        } = self;
337
338        for payload in panic_payload {
339            payload.visit_provenance(visit);
340        }
341        last_error.visit_provenance(visit);
342        for frame in stack {
343            frame.visit_provenance(visit)
344        }
345    }
346}
347
348impl VisitProvenance for Frame<'_, Provenance, FrameExtra<'_>> {
349    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
350        let return_place = self.return_place();
351        let Frame {
352            locals,
353            extra,
354            // There are some private fields we cannot access; they contain no tags.
355            ..
356        } = self;
357
358        // Return place.
359        return_place.visit_provenance(visit);
360        // Locals.
361        for local in locals.iter() {
362            match local.as_mplace_or_imm() {
363                None => {}
364                Some(Either::Left((ptr, meta))) => {
365                    ptr.visit_provenance(visit);
366                    meta.visit_provenance(visit);
367                }
368                Some(Either::Right(imm)) => {
369                    imm.visit_provenance(visit);
370                }
371            }
372        }
373
374        extra.visit_provenance(visit);
375    }
376}
377
378/// The moment in time when a blocked thread should be woken up.
379#[derive(Debug)]
380enum Timeout {
381    Monotonic(Instant),
382    RealTime(SystemTime),
383}
384
385impl Timeout {
386    /// How long do we have to wait from now until the specified time?
387    fn get_wait_time(&self, clock: &MonotonicClock) -> Duration {
388        match self {
389            Timeout::Monotonic(instant) => instant.duration_since(clock.now()),
390            Timeout::RealTime(time) =>
391                time.duration_since(SystemTime::now()).unwrap_or(Duration::ZERO),
392        }
393    }
394
395    /// Will try to add `duration`, but if that overflows it may add less.
396    fn add_lossy(&self, duration: Duration) -> Self {
397        match self {
398            Timeout::Monotonic(i) => Timeout::Monotonic(i.add_lossy(duration)),
399            Timeout::RealTime(s) => {
400                // If this overflows, try adding just 1h and assume that will not overflow.
401                Timeout::RealTime(
402                    s.checked_add(duration)
403                        .unwrap_or_else(|| s.checked_add(Duration::from_secs(3600)).unwrap()),
404                )
405            }
406        }
407    }
408}
409
410/// The clock to use for the timeout you are asking for.
411#[derive(Debug, Copy, Clone, PartialEq)]
412pub enum TimeoutClock {
413    Monotonic,
414    RealTime,
415}
416
417/// Whether the timeout is relative or absolute.
418#[derive(Debug, Copy, Clone)]
419pub enum TimeoutAnchor {
420    Relative,
421    Absolute,
422}
423
424/// An error signaling that the requested thread doesn't exist or has terminated.
425#[derive(Debug, Copy, Clone)]
426pub enum ThreadLookupError {
427    /// No thread with this ID exists.
428    InvalidId,
429    /// The thread exists but has already terminated.
430    Terminated(ThreadId),
431}
432
433/// A set of threads.
434#[derive(Debug)]
435pub struct ThreadManager<'tcx> {
436    /// Identifier of the currently active thread.
437    active_thread: ThreadId,
438    /// Threads used in the program.
439    ///
440    /// Note that this vector also contains terminated threads.
441    threads: IndexVec<ThreadId, Thread<'tcx>>,
442    /// A mapping from a thread-local static to the thread specific allocation.
443    thread_local_allocs: FxHashMap<(DefId, ThreadId), StrictPointer>,
444    /// A flag that indicates that we should change the active thread.
445    /// Completely ignored in GenMC mode.
446    yield_active_thread: bool,
447    /// A flag that indicates that we should do round robin scheduling of threads else randomized scheduling is used.
448    fixed_scheduling: bool,
449}
450
451impl VisitProvenance for ThreadManager<'_> {
452    fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
453        let ThreadManager {
454            threads,
455            thread_local_allocs,
456            active_thread: _,
457            yield_active_thread: _,
458            fixed_scheduling: _,
459        } = self;
460
461        for thread in threads {
462            thread.visit_provenance(visit);
463        }
464        for ptr in thread_local_allocs.values() {
465            ptr.visit_provenance(visit);
466        }
467    }
468}
469
470impl<'tcx> ThreadManager<'tcx> {
471    pub(crate) fn new(config: &MiriConfig) -> Self {
472        let mut threads = IndexVec::new();
473        // Create the main thread and add it to the list of threads.
474        threads.push(Thread::new(Some("main"), None));
475        Self {
476            active_thread: ThreadId::MAIN_THREAD,
477            threads,
478            thread_local_allocs: Default::default(),
479            yield_active_thread: false,
480            fixed_scheduling: config.fixed_scheduling,
481        }
482    }
483
484    pub(crate) fn init(
485        ecx: &mut MiriInterpCx<'tcx>,
486        on_main_stack_empty: StackEmptyCallback<'tcx>,
487    ) {
488        ecx.machine.threads.threads[ThreadId::MAIN_THREAD].on_stack_empty =
489            Some(on_main_stack_empty);
490        if ecx.tcx.sess.target.os != Os::Windows {
491            // The main thread can *not* be joined on except on windows.
492            ecx.machine.threads.threads[ThreadId::MAIN_THREAD].join_status =
493                ThreadJoinStatus::Detached;
494        }
495    }
496
497    /// Returns the `ThreadId` for the given raw thread id.
498    /// Returns `Err(ThreadNotFound::InvalidId)` if the id is out of range, or
499    /// `Err(ThreadNotFound::Terminated(id))` if the thread exists but has terminated.
500    pub fn thread_id_try_from(&self, id: impl TryInto<u32>) -> Result<ThreadId, ThreadLookupError> {
501        if let Ok(id) = id.try_into()
502            && usize::try_from(id).is_ok_and(|id| id < self.threads.len())
503        {
504            let thread_id = ThreadId(id);
505            if self.threads[thread_id].state.is_terminated() {
506                Err(ThreadLookupError::Terminated(thread_id))
507            } else {
508                Ok(thread_id)
509            }
510        } else {
511            Err(ThreadLookupError::InvalidId)
512        }
513    }
514
515    /// Check if we have an allocation for the given thread local static for the
516    /// active thread.
517    fn get_thread_local_alloc_id(&self, def_id: DefId) -> Option<StrictPointer> {
518        self.thread_local_allocs.get(&(def_id, self.active_thread)).cloned()
519    }
520
521    /// Set the pointer for the allocation of the given thread local
522    /// static for the active thread.
523    ///
524    /// Panics if a thread local is initialized twice for the same thread.
525    fn set_thread_local_alloc(&mut self, def_id: DefId, ptr: StrictPointer) {
526        self.thread_local_allocs.try_insert((def_id, self.active_thread), ptr).unwrap();
527    }
528
529    /// Borrow the stack of the active thread.
530    pub fn active_thread_stack(&self) -> &[Frame<'tcx, Provenance, FrameExtra<'tcx>>] {
531        &self.threads[self.active_thread].stack
532    }
533
534    /// Mutably borrow the stack of the active thread.
535    pub fn active_thread_stack_mut(
536        &mut self,
537    ) -> &mut Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
538        &mut self.threads[self.active_thread].stack
539    }
540
541    pub fn all_blocked_stacks(
542        &self,
543    ) -> impl Iterator<Item = (ThreadId, &[Frame<'tcx, Provenance, FrameExtra<'tcx>>])> {
544        self.threads
545            .iter_enumerated()
546            .filter(|(_id, t)| matches!(t.state, ThreadState::Blocked { .. }))
547            .map(|(id, t)| (id, &t.stack[..]))
548    }
549
550    /// Create a new thread and returns its id.
551    fn create_thread(&mut self, on_stack_empty: StackEmptyCallback<'tcx>) -> ThreadId {
552        let new_thread_id = ThreadId::new(self.threads.len());
553        self.threads.push(Thread::new(None, Some(on_stack_empty)));
554        new_thread_id
555    }
556
557    /// Set an active thread and return the id of the thread that was active before.
558    fn set_active_thread_id(&mut self, id: ThreadId) -> ThreadId {
559        assert!(id.index() < self.threads.len());
560        info!(
561            "---------- Now executing on thread `{}` (previous: `{}`) ----------------------------------------",
562            self.get_thread_display_name(id),
563            self.get_thread_display_name(self.active_thread)
564        );
565        std::mem::replace(&mut self.active_thread, id)
566    }
567
568    /// Get the id of the currently active thread.
569    pub fn active_thread(&self) -> ThreadId {
570        self.active_thread
571    }
572
573    /// Get the total number of threads that were ever spawn by this program.
574    pub fn get_total_thread_count(&self) -> usize {
575        self.threads.len()
576    }
577
578    /// Get the total of threads that are currently live, i.e., not yet terminated.
579    /// (They might be blocked.)
580    pub fn get_live_thread_count(&self) -> usize {
581        self.threads.iter().filter(|t| !t.state.is_terminated()).count()
582    }
583
584    /// Has the given thread terminated?
585    fn has_terminated(&self, thread_id: ThreadId) -> bool {
586        self.threads[thread_id].state.is_terminated()
587    }
588
589    /// Have all threads terminated?
590    fn have_all_terminated(&self) -> bool {
591        self.threads.iter().all(|thread| thread.state.is_terminated())
592    }
593
594    /// Enable the thread for execution. The thread must be terminated.
595    fn enable_thread(&mut self, thread_id: ThreadId) {
596        assert!(self.has_terminated(thread_id));
597        self.threads[thread_id].state = ThreadState::Enabled;
598    }
599
600    /// Get a mutable borrow of the currently active thread.
601    pub fn active_thread_mut(&mut self) -> &mut Thread<'tcx> {
602        &mut self.threads[self.active_thread]
603    }
604
605    /// Get a shared borrow of the currently active thread.
606    pub fn active_thread_ref(&self) -> &Thread<'tcx> {
607        &self.threads[self.active_thread]
608    }
609
610    pub fn thread_ref(&self, thread_id: ThreadId) -> &Thread<'tcx> {
611        &self.threads[thread_id]
612    }
613
614    /// Mark the thread as detached, which means that no other thread will try
615    /// to join it and the thread is responsible for cleaning up.
616    ///
617    /// `allow_terminated_joined` allows detaching joined threads that have already terminated.
618    /// This matches Windows's behavior for `CloseHandle`.
619    ///
620    /// See <https://docs.microsoft.com/en-us/windows/win32/procthread/thread-handles-and-identifiers>:
621    /// > The handle is valid until closed, even after the thread it represents has been terminated.
622    fn detach_thread(&mut self, id: ThreadId, allow_terminated_joined: bool) -> InterpResult<'tcx> {
623        // NOTE: In GenMC mode, we treat detached threads like regular threads that are never joined, so there is no special handling required here.
624        trace!("detaching {:?}", id);
625
626        let is_ub = if allow_terminated_joined && self.threads[id].state.is_terminated() {
627            // "Detached" in particular means "not yet joined". Redundant detaching is still UB.
628            self.threads[id].join_status == ThreadJoinStatus::Detached
629        } else {
630            self.threads[id].join_status != ThreadJoinStatus::Joinable
631        };
632        if is_ub {
633            throw_ub_format!("trying to detach thread that was already detached or joined");
634        }
635
636        self.threads[id].join_status = ThreadJoinStatus::Detached;
637        interp_ok(())
638    }
639
640    /// Set the name of the given thread.
641    pub fn set_thread_name(&mut self, thread: ThreadId, new_thread_name: Vec<u8>) {
642        self.threads[thread].thread_name = Some(new_thread_name);
643    }
644
645    /// Get the name of the given thread.
646    pub fn get_thread_name(&self, thread: ThreadId) -> Option<&[u8]> {
647        self.threads[thread].thread_name()
648    }
649
650    pub fn get_thread_display_name(&self, thread: ThreadId) -> String {
651        self.threads[thread].thread_display_name(thread)
652    }
653
654    /// Put the thread into the blocked state.
655    fn block_thread(
656        &mut self,
657        reason: BlockReason,
658        timeout: Option<Timeout>,
659        callback: DynUnblockCallback<'tcx>,
660    ) {
661        let state = &mut self.threads[self.active_thread].state;
662        assert!(state.is_enabled());
663        *state = ThreadState::Blocked { reason, timeout, callback }
664    }
665
666    /// Change the active thread to some enabled thread.
667    fn yield_active_thread(&mut self) {
668        // We do not yield immediately, as swapping out the current stack while executing a MIR statement
669        // could lead to all sorts of confusion.
670        // We should only switch stacks between steps.
671        self.yield_active_thread = true;
672    }
673}
674
675impl<'tcx> EvalContextPrivExt<'tcx> for MiriInterpCx<'tcx> {}
676trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
677    #[inline]
678    fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> {
679        let this = self.eval_context_mut();
680        let active_thread = this.active_thread_mut();
681        active_thread.origin_span = DUMMY_SP; // reset, the old value no longer applied
682        let mut callback = active_thread
683            .on_stack_empty
684            .take()
685            .expect("`on_stack_empty` not set up, or already running");
686        let res = callback(this)?;
687        this.active_thread_mut().on_stack_empty = Some(callback);
688        interp_ok(res)
689    }
690
691    /// Decide which action to take next and on which thread.
692    ///
693    /// The currently implemented scheduling policy is the one that is commonly
694    /// used in stateless model checkers such as Loom: run the active thread as
695    /// long as we can and switch only when we have to (the active thread was
696    /// blocked, terminated, or has explicitly asked to be preempted).
697    ///
698    /// If GenMC mode is active, the scheduling is instead handled by GenMC.
699    fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
700        let this = self.eval_context_mut();
701
702        // In GenMC mode, we let GenMC do the scheduling.
703        if this.machine.data_race.as_genmc_ref().is_some() {
704            loop {
705                let genmc_ctx = this.machine.data_race.as_genmc_ref().unwrap();
706                let Some(next_thread_id) = genmc_ctx.schedule_thread(this)? else {
707                    return interp_ok(SchedulingAction::ExecuteStep);
708                };
709                // If a thread is blocked on GenMC, we have to implicitly unblock it when it gets scheduled again.
710                if this.machine.threads.threads[next_thread_id]
711                    .state
712                    .is_blocked_on(&BlockReason::Genmc)
713                {
714                    info!(
715                        "GenMC: scheduling blocked thread {next_thread_id:?}, so we unblock it now."
716                    );
717                    this.unblock_thread(next_thread_id, BlockReason::Genmc)?;
718                }
719                // The thread we just unblocked may have been blocked again during the unblocking callback.
720                // In that case, we need to ask for a different thread to run next.
721                let thread_manager = &mut this.machine.threads;
722                if thread_manager.threads[next_thread_id].state.is_enabled() {
723                    // Set the new active thread.
724                    thread_manager.active_thread = next_thread_id;
725                    return interp_ok(SchedulingAction::ExecuteStep);
726                }
727            }
728        }
729
730        // We are not in GenMC mode, so we control the scheduling.
731        let thread_manager = &this.machine.threads;
732        // This thread and the program can keep going.
733        if thread_manager.threads[thread_manager.active_thread].state.is_enabled()
734            && !thread_manager.yield_active_thread
735        {
736            // The currently active thread is still enabled, just continue with it.
737            return interp_ok(SchedulingAction::ExecuteStep);
738        }
739
740        // The active thread yielded or got terminated. Let's see if there are any I/O events
741        // or timeouts to take care of.
742
743        if this.machine.communicate() {
744            // When isolation is disabled we need to check for events for
745            // threads which are blocked on host I/O.
746            // We do this before running any other threads such that the threads
747            // which received events are available for scheduling afterwards.
748
749            // Perform a non-blocking poll for newly available I/O events from the OS.
750            this.poll_and_unblock(Some(Duration::ZERO))?;
751        }
752
753        // We also check timeouts before running any other thread, to ensure that timeouts
754        // "in the past" fire before any other thread can take an action. This ensures that for
755        // `pthread_cond_timedwait`, "an error is returned if [...] the absolute time specified by
756        // abstime has already been passed at the time of the call".
757        // <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
758        let potential_sleep_time = this.unblock_expired_timeouts()?;
759
760        let thread_manager = &mut this.machine.threads;
761        let rng = this.machine.rng.get_mut();
762
763        // No callbacks immediately scheduled, pick a regular thread to execute.
764        // The active thread blocked or yielded. So we go search for another enabled thread.
765        // We build the list of threads by starting with the threads after the current one, followed by
766        // the threads before the current one and then the current thread itself (i.e., this iterator acts
767        // like `threads.rotate_left(self.active_thread.index() + 1)`. This ensures that if we pick the first
768        // eligible thread, we do regular round-robin scheduling, and all threads get a chance to take a step.
769        let mut threads_iter = thread_manager
770            .threads
771            .iter_enumerated()
772            .skip(thread_manager.active_thread.index() + 1)
773            .chain(
774                thread_manager
775                    .threads
776                    .iter_enumerated()
777                    .take(thread_manager.active_thread.index() + 1),
778            )
779            .filter(|(_id, thread)| thread.state.is_enabled());
780        // Pick a new thread, and switch to it.
781        let new_thread = if thread_manager.fixed_scheduling {
782            threads_iter.next()
783        } else {
784            threads_iter.choose(rng)
785        };
786
787        if let Some((id, _thread)) = new_thread {
788            if thread_manager.active_thread != id {
789                info!(
790                    "---------- Now executing on thread `{}` (previous: `{}`) ----------------------------------------",
791                    thread_manager.get_thread_display_name(id),
792                    thread_manager.get_thread_display_name(thread_manager.active_thread)
793                );
794                thread_manager.active_thread = id;
795            }
796        }
797        // This completes the `yield`, if any was requested.
798        thread_manager.yield_active_thread = false;
799
800        if thread_manager.threads[thread_manager.active_thread].state.is_enabled() {
801            return interp_ok(SchedulingAction::ExecuteStep);
802        }
803        // We have not found a thread to execute.
804        if thread_manager.threads.iter().all(|thread| thread.state.is_terminated()) {
805            unreachable!("all threads terminated without the main thread terminating?!");
806        } else if let Some(sleep_time) = potential_sleep_time {
807            // All threads are currently blocked, but we have unexecuted
808            // timeout_callbacks, which may unblock some of the threads. Hence,
809            // sleep until the first callback.
810            interp_ok(SchedulingAction::SleepAndWaitForIo(Some(sleep_time)))
811        } else if thread_manager
812            .threads
813            .iter()
814            .any(|thread| thread.state.is_blocked_on(&BlockReason::IO))
815        {
816            // At least one thread is blocked on host I/O but doesn't
817            // have a timeout set. Hence, we sleep indefinitely in the
818            // hope that eventually an I/O event for this thread happens.
819            interp_ok(SchedulingAction::SleepAndWaitForIo(None))
820        } else {
821            throw_machine_stop!(TerminationInfo::GlobalDeadlock);
822        }
823    }
824
825    /// Poll for I/O events until either an I/O event happened or the timeout expired.
826    /// The different timeout values are described in [`BlockingIoManager::poll`].
827    fn poll_and_unblock(&mut self, timeout: Option<Duration>) -> InterpResult<'tcx> {
828        let this = self.eval_context_mut();
829
830        let ready = match this.machine.blocking_io.poll(timeout) {
831            Ok(ready) => ready,
832            // We can ignore errors originating from interrupts; that's just a spurious wakeup.
833            Err(e) if e.kind() == io::ErrorKind::Interrupted => return interp_ok(()),
834            // For other errors we panic. On Linux and BSD hosts this should only be
835            // reachable when a system resource error (e.g. ENOMEM or ENOSPC) occurred.
836            Err(e) => panic!("unexpected error while polling: {e}"),
837        };
838
839        ready.into_iter().try_for_each(|(receiver, _source)| {
840            match receiver {
841                InterestReceiver::UnblockThread(thread_id) =>
842                    this.unblock_thread(thread_id, BlockReason::IO),
843            }
844        })
845    }
846
847    /// Find all threads with expired timeouts, unblock them and execute their timeout callbacks.
848    ///
849    /// This method returns the minimum duration until the next thread timeout expires.
850    /// If all ready threads have no timeout set, [`None`] is returned.
851    fn unblock_expired_timeouts(&mut self) -> InterpResult<'tcx, Option<Duration>> {
852        let this = self.eval_context_mut();
853        let clock = &this.machine.monotonic_clock;
854
855        let mut min_wait_time = Option::<Duration>::None;
856        let mut callbacks = Vec::new();
857
858        for (id, thread) in this.machine.threads.threads.iter_enumerated_mut() {
859            match &thread.state {
860                ThreadState::Blocked { timeout: Some(timeout), .. } => {
861                    let wait_time = timeout.get_wait_time(clock);
862                    if wait_time.is_zero() {
863                        // The timeout expired for this thread.
864                        let old_state = mem::replace(&mut thread.state, ThreadState::Enabled);
865                        let ThreadState::Blocked { callback, .. } = old_state else {
866                            unreachable!()
867                        };
868                        // Add callback to list to be run after this loop because of borrow-checking.
869                        callbacks.push((id, callback));
870                    } else {
871                        // Update `min_wait_time` to contain the smallest duration until
872                        // the next timeout expires.
873                        min_wait_time = Some(wait_time.min(min_wait_time.unwrap_or(Duration::MAX)));
874                    }
875                }
876                _ => {}
877            }
878        }
879
880        for (thread, callback) in callbacks {
881            // This back-and-forth with `set_active_thread` is here because of two
882            // design decisions:
883            // 1. Make the caller and not the callback responsible for changing
884            //    thread.
885            // 2. Make the scheduler the only place that can change the active
886            //    thread.
887            let old_thread = this.machine.threads.set_active_thread_id(thread);
888            callback.call(this, UnblockKind::TimedOut)?;
889            this.machine.threads.set_active_thread_id(old_thread);
890        }
891
892        interp_ok(min_wait_time)
893    }
894}
895
896// Public interface to thread management.
897impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
898pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
899    #[inline]
900    fn thread_id_try_from(&self, id: impl TryInto<u32>) -> Result<ThreadId, ThreadLookupError> {
901        self.eval_context_ref().machine.threads.thread_id_try_from(id)
902    }
903
904    /// Get a thread-specific allocation id for the given thread-local static.
905    /// If needed, allocate a new one.
906    fn get_or_create_thread_local_alloc(
907        &mut self,
908        def_id: DefId,
909    ) -> InterpResult<'tcx, StrictPointer> {
910        let this = self.eval_context_mut();
911        let tcx = this.tcx;
912        if let Some(old_alloc) = this.machine.threads.get_thread_local_alloc_id(def_id) {
913            // We already have a thread-specific allocation id for this
914            // thread-local static.
915            interp_ok(old_alloc)
916        } else {
917            // We need to allocate a thread-specific allocation id for this
918            // thread-local static.
919            // First, we compute the initial value for this static.
920            if tcx.is_foreign_item(def_id) {
921                throw_unsup_format!("foreign thread-local statics are not supported");
922            }
923            let params = this.machine.get_default_alloc_params();
924            let alloc = this.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
925            // We make a full copy of this allocation.
926            let mut alloc = alloc.inner().adjust_from_tcx(
927                &this.tcx,
928                |bytes, align| {
929                    interp_ok(MiriAllocBytes::from_bytes(
930                        std::borrow::Cow::Borrowed(bytes),
931                        align,
932                        params,
933                    ))
934                },
935                |ptr| this.global_root_pointer(ptr),
936            )?;
937            // This allocation will be deallocated when the thread dies, so it is not in read-only memory.
938            alloc.mutability = Mutability::Mut;
939            // Create a fresh allocation with this content.
940            let ptr = this.insert_allocation(alloc, MiriMemoryKind::Tls.into())?;
941            this.machine.threads.set_thread_local_alloc(def_id, ptr);
942            interp_ok(ptr)
943        }
944    }
945
946    /// Start a regular (non-main) thread.
947    #[inline]
948    fn start_regular_thread(
949        &mut self,
950        thread: Option<MPlaceTy<'tcx>>,
951        start_routine: Pointer,
952        start_abi: ExternAbi,
953        func_arg: ImmTy<'tcx>,
954        ret_layout: TyAndLayout<'tcx>,
955    ) -> InterpResult<'tcx, ThreadId> {
956        let this = self.eval_context_mut();
957
958        // Create the new thread
959        let current_span = this.machine.current_user_relevant_span();
960        let new_thread_id = this.machine.threads.create_thread({
961            let mut state = tls::TlsDtorsState::default();
962            Box::new(move |m| state.on_stack_empty(m))
963        });
964        match &mut this.machine.data_race {
965            GlobalDataRaceHandler::None => {}
966            GlobalDataRaceHandler::Vclocks(data_race) =>
967                data_race.thread_created(&this.machine.threads, new_thread_id, current_span),
968            GlobalDataRaceHandler::Genmc(genmc_ctx) =>
969                genmc_ctx.handle_thread_create(
970                    &this.machine.threads,
971                    start_routine,
972                    &func_arg,
973                    new_thread_id,
974                )?,
975        }
976        // Write the current thread-id, switch to the next thread later
977        // to treat this write operation as occurring on the current thread.
978        if let Some(thread_info_place) = thread {
979            this.write_scalar(
980                Scalar::from_uint(new_thread_id.to_u32(), thread_info_place.layout.size),
981                &thread_info_place,
982            )?;
983        }
984
985        // Finally switch to new thread so that we can push the first stackframe.
986        // After this all accesses will be treated as occurring in the new thread.
987        let old_thread_id = this.machine.threads.set_active_thread_id(new_thread_id);
988
989        // The child inherits its parent's cpu affinity.
990        if let Some(cpuset) = this.machine.thread_cpu_affinity.get(&old_thread_id).cloned() {
991            this.machine.thread_cpu_affinity.insert(new_thread_id, cpuset);
992        }
993
994        // Perform the function pointer load in the new thread frame.
995        let instance = this.get_ptr_fn(start_routine)?.as_instance()?;
996
997        // Note: the returned value is currently ignored (see the FIXME in
998        // pthread_join in shims/unix/thread.rs) because the Rust standard library does not use
999        // it.
1000        let ret_place = this.allocate(ret_layout, MiriMemoryKind::Machine.into())?;
1001
1002        this.call_thread_root_function(
1003            instance,
1004            start_abi,
1005            &[func_arg],
1006            Some(&ret_place),
1007            current_span,
1008        )?;
1009
1010        // Restore the old active thread frame.
1011        this.machine.threads.set_active_thread_id(old_thread_id);
1012
1013        interp_ok(new_thread_id)
1014    }
1015
1016    /// Handles thread termination of the active thread: wakes up threads joining on this one,
1017    /// and deals with the thread's thread-local statics according to `tls_alloc_action`.
1018    ///
1019    /// This is called by the eval loop when a thread's on_stack_empty returns `Ready`.
1020    fn terminate_active_thread(&mut self, tls_alloc_action: TlsAllocAction) -> InterpResult<'tcx> {
1021        let this = self.eval_context_mut();
1022
1023        // Mark thread as terminated.
1024        let thread = this.active_thread_mut();
1025        assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated");
1026        thread.state = ThreadState::Terminated;
1027
1028        // Deallocate TLS.
1029        let gone_thread = this.active_thread();
1030        {
1031            let mut free_tls_statics = Vec::new();
1032            this.machine.threads.thread_local_allocs.retain(|&(_def_id, thread), &mut alloc_id| {
1033                if thread != gone_thread {
1034                    // A different thread, keep this static around.
1035                    return true;
1036                }
1037                // Delete this static from the map and from memory.
1038                // We cannot free directly here as we cannot use `?` in this context.
1039                free_tls_statics.push(alloc_id);
1040                false
1041            });
1042            // Now free the TLS statics.
1043            for ptr in free_tls_statics {
1044                match tls_alloc_action {
1045                    TlsAllocAction::Deallocate =>
1046                        this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?,
1047                    TlsAllocAction::Leak =>
1048                        if let Some(alloc) = ptr.provenance.get_alloc_id() {
1049                            trace!(
1050                                "Thread-local static leaked and stored as static root: {:?}",
1051                                alloc
1052                            );
1053                            this.machine.static_roots.push(alloc);
1054                        },
1055                }
1056            }
1057        }
1058
1059        match &mut this.machine.data_race {
1060            GlobalDataRaceHandler::None => {}
1061            GlobalDataRaceHandler::Vclocks(data_race) =>
1062                data_race.thread_terminated(&this.machine.threads),
1063            GlobalDataRaceHandler::Genmc(genmc_ctx) => {
1064                // Inform GenMC that the thread finished.
1065                // This needs to happen once all accesses to the thread are done, including freeing any TLS statics.
1066                genmc_ctx.handle_thread_finish(&this.machine.threads)
1067            }
1068        }
1069
1070        // Unblock joining threads.
1071        let unblock_reason = BlockReason::Join(gone_thread);
1072        let threads = &this.machine.threads.threads;
1073        let joining_threads = threads
1074            .iter_enumerated()
1075            .filter(|(_, thread)| thread.state.is_blocked_on(&unblock_reason))
1076            .map(|(id, _)| id)
1077            .collect::<Vec<_>>();
1078        for thread in joining_threads {
1079            this.unblock_thread(thread, unblock_reason.clone())?;
1080        }
1081
1082        interp_ok(())
1083    }
1084
1085    /// Block the current thread, with an optional timeout.
1086    /// The callback will be invoked when the thread gets unblocked.
1087    #[inline]
1088    fn block_thread(
1089        &mut self,
1090        reason: BlockReason,
1091        timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
1092        callback: DynUnblockCallback<'tcx>,
1093    ) {
1094        let this = self.eval_context_mut();
1095        if timeout.is_some() && this.machine.data_race.as_genmc_ref().is_some() {
1096            panic!("Unimplemented: Timeouts not yet supported in GenMC mode.");
1097        }
1098        let timeout = timeout.map(|(clock, anchor, duration)| {
1099            let anchor = match clock {
1100                TimeoutClock::RealTime => {
1101                    assert!(
1102                        this.machine.communicate(),
1103                        "cannot have `RealTime` timeout with isolation enabled!"
1104                    );
1105                    Timeout::RealTime(match anchor {
1106                        TimeoutAnchor::Absolute => SystemTime::UNIX_EPOCH,
1107                        TimeoutAnchor::Relative => SystemTime::now(),
1108                    })
1109                }
1110                TimeoutClock::Monotonic =>
1111                    Timeout::Monotonic(match anchor {
1112                        TimeoutAnchor::Absolute => this.machine.monotonic_clock.epoch(),
1113                        TimeoutAnchor::Relative => this.machine.monotonic_clock.now(),
1114                    }),
1115            };
1116            anchor.add_lossy(duration)
1117        });
1118        this.machine.threads.block_thread(reason, timeout, callback);
1119    }
1120
1121    /// Put the blocked thread into the enabled state.
1122    /// Sanity-checks that the thread previously was blocked for the right reason.
1123    fn unblock_thread(&mut self, thread: ThreadId, reason: BlockReason) -> InterpResult<'tcx> {
1124        let this = self.eval_context_mut();
1125        let old_state =
1126            mem::replace(&mut this.machine.threads.threads[thread].state, ThreadState::Enabled);
1127        let callback = match old_state {
1128            ThreadState::Blocked { reason: actual_reason, callback, .. } => {
1129                assert_eq!(
1130                    reason, actual_reason,
1131                    "unblock_thread: thread was blocked for the wrong reason"
1132                );
1133                callback
1134            }
1135            _ => panic!("unblock_thread: thread was not blocked"),
1136        };
1137        // The callback must be executed in the previously blocked thread.
1138        let old_thread = this.machine.threads.set_active_thread_id(thread);
1139        callback.call(this, UnblockKind::Ready)?;
1140        this.machine.threads.set_active_thread_id(old_thread);
1141        interp_ok(())
1142    }
1143
1144    #[inline]
1145    fn detach_thread(
1146        &mut self,
1147        thread_id: ThreadId,
1148        allow_terminated_joined: bool,
1149    ) -> InterpResult<'tcx> {
1150        let this = self.eval_context_mut();
1151        this.machine.threads.detach_thread(thread_id, allow_terminated_joined)
1152    }
1153
1154    /// Mark that the active thread tries to join the thread with `joined_thread_id`.
1155    ///
1156    /// When the join is successful (immediately, or as soon as the joined thread finishes), `success_retval` will be written to `return_dest`.
1157    fn join_thread(
1158        &mut self,
1159        joined_thread_id: ThreadId,
1160        success_retval: Scalar,
1161        return_dest: &MPlaceTy<'tcx>,
1162    ) -> InterpResult<'tcx> {
1163        let this = self.eval_context_mut();
1164        let thread_mgr = &mut this.machine.threads;
1165        if thread_mgr.threads[joined_thread_id].join_status == ThreadJoinStatus::Detached {
1166            // On Windows this corresponds to joining on a closed handle.
1167            throw_ub_format!("trying to join a detached thread");
1168        }
1169
1170        fn after_join<'tcx>(
1171            this: &mut InterpCx<'tcx, MiriMachine<'tcx>>,
1172            joined_thread_id: ThreadId,
1173            success_retval: Scalar,
1174            return_dest: &MPlaceTy<'tcx>,
1175        ) -> InterpResult<'tcx> {
1176            let threads = &this.machine.threads;
1177            match &mut this.machine.data_race {
1178                GlobalDataRaceHandler::None => {}
1179                GlobalDataRaceHandler::Vclocks(data_race) =>
1180                    data_race.thread_joined(threads, joined_thread_id),
1181                GlobalDataRaceHandler::Genmc(genmc_ctx) =>
1182                    genmc_ctx.handle_thread_join(threads.active_thread, joined_thread_id)?,
1183            }
1184            this.write_scalar(success_retval, return_dest)?;
1185            interp_ok(())
1186        }
1187
1188        // Mark the joined thread as being joined so that we detect if other
1189        // threads try to join it.
1190        thread_mgr.threads[joined_thread_id].join_status = ThreadJoinStatus::Joined;
1191        if !thread_mgr.threads[joined_thread_id].state.is_terminated() {
1192            trace!(
1193                "{:?} blocked on {:?} when trying to join",
1194                thread_mgr.active_thread, joined_thread_id
1195            );
1196            if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() {
1197                genmc_ctx.handle_thread_join(thread_mgr.active_thread, joined_thread_id)?;
1198            }
1199
1200            // The joined thread is still running, we need to wait for it.
1201            // Once we get unblocked, perform the appropriate synchronization and write the return value.
1202            let dest = return_dest.clone();
1203            thread_mgr.block_thread(
1204                BlockReason::Join(joined_thread_id),
1205                None,
1206                callback!(
1207                    @capture<'tcx> {
1208                        joined_thread_id: ThreadId,
1209                        dest: MPlaceTy<'tcx>,
1210                        success_retval: Scalar,
1211                    }
1212                    |this, unblock: UnblockKind| {
1213                        assert_eq!(unblock, UnblockKind::Ready);
1214                        after_join(this, joined_thread_id, success_retval, &dest)
1215                    }
1216                ),
1217            );
1218        } else {
1219            // The thread has already terminated - establish happens-before and write the return value.
1220            after_join(this, joined_thread_id, success_retval, return_dest)?;
1221        }
1222        interp_ok(())
1223    }
1224
1225    /// Mark that the active thread tries to exclusively join the thread with `joined_thread_id`.
1226    /// If the thread is already joined by another thread, it will throw UB.
1227    ///
1228    /// When the join is successful (immediately, or as soon as the joined thread finishes), `success_retval` will be written to `return_dest`.
1229    fn join_thread_exclusive(
1230        &mut self,
1231        joined_thread_id: ThreadId,
1232        success_retval: Scalar,
1233        return_dest: &MPlaceTy<'tcx>,
1234    ) -> InterpResult<'tcx> {
1235        let this = self.eval_context_mut();
1236        let threads = &this.machine.threads.threads;
1237        if threads[joined_thread_id].join_status == ThreadJoinStatus::Joined {
1238            throw_ub_format!("trying to join an already joined thread");
1239        }
1240
1241        if joined_thread_id == this.machine.threads.active_thread {
1242            throw_ub_format!("trying to join itself");
1243        }
1244
1245        // Sanity check `join_status`.
1246        assert!(
1247            threads.iter().all(|thread| {
1248                !thread.state.is_blocked_on(&BlockReason::Join(joined_thread_id))
1249            }),
1250            "this thread already has threads waiting for its termination"
1251        );
1252
1253        this.join_thread(joined_thread_id, success_retval, return_dest)
1254    }
1255
1256    #[inline]
1257    fn active_thread(&self) -> ThreadId {
1258        let this = self.eval_context_ref();
1259        this.machine.threads.active_thread()
1260    }
1261
1262    #[inline]
1263    fn active_thread_mut(&mut self) -> &mut Thread<'tcx> {
1264        let this = self.eval_context_mut();
1265        this.machine.threads.active_thread_mut()
1266    }
1267
1268    #[inline]
1269    fn active_thread_ref(&self) -> &Thread<'tcx> {
1270        let this = self.eval_context_ref();
1271        this.machine.threads.active_thread_ref()
1272    }
1273
1274    #[inline]
1275    fn get_total_thread_count(&self) -> usize {
1276        let this = self.eval_context_ref();
1277        this.machine.threads.get_total_thread_count()
1278    }
1279
1280    #[inline]
1281    fn have_all_terminated(&self) -> bool {
1282        let this = self.eval_context_ref();
1283        this.machine.threads.have_all_terminated()
1284    }
1285
1286    #[inline]
1287    fn enable_thread(&mut self, thread_id: ThreadId) {
1288        let this = self.eval_context_mut();
1289        this.machine.threads.enable_thread(thread_id);
1290    }
1291
1292    #[inline]
1293    fn active_thread_stack<'a>(&'a self) -> &'a [Frame<'tcx, Provenance, FrameExtra<'tcx>>] {
1294        let this = self.eval_context_ref();
1295        this.machine.threads.active_thread_stack()
1296    }
1297
1298    #[inline]
1299    fn active_thread_stack_mut<'a>(
1300        &'a mut self,
1301    ) -> &'a mut Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
1302        let this = self.eval_context_mut();
1303        this.machine.threads.active_thread_stack_mut()
1304    }
1305
1306    /// Set the name of the current thread. The buffer must not include the null terminator.
1307    #[inline]
1308    fn set_thread_name(&mut self, thread: ThreadId, new_thread_name: Vec<u8>) {
1309        self.eval_context_mut().machine.threads.set_thread_name(thread, new_thread_name);
1310    }
1311
1312    #[inline]
1313    fn get_thread_name<'c>(&'c self, thread: ThreadId) -> Option<&'c [u8]>
1314    where
1315        'tcx: 'c,
1316    {
1317        self.eval_context_ref().machine.threads.get_thread_name(thread)
1318    }
1319
1320    #[inline]
1321    fn yield_active_thread(&mut self) {
1322        self.eval_context_mut().machine.threads.yield_active_thread();
1323    }
1324
1325    #[inline]
1326    fn maybe_preempt_active_thread(&mut self) {
1327        use rand::Rng as _;
1328
1329        let this = self.eval_context_mut();
1330        if !this.machine.threads.fixed_scheduling
1331            && this.machine.rng.get_mut().random_bool(this.machine.preemption_rate)
1332        {
1333            this.yield_active_thread();
1334        }
1335    }
1336
1337    /// Run the core interpreter loop. Returns only when an interrupt occurs (an error or program
1338    /// termination).
1339    fn run_threads(&mut self) -> InterpResult<'tcx, !> {
1340        let this = self.eval_context_mut();
1341        loop {
1342            if CTRL_C_RECEIVED.load(Relaxed) {
1343                this.machine.handle_abnormal_termination();
1344                throw_machine_stop!(TerminationInfo::Interrupted);
1345            }
1346            match this.schedule()? {
1347                SchedulingAction::ExecuteStep => {
1348                    if !this.step()? {
1349                        // See if this thread can do something else.
1350                        match this.run_on_stack_empty()? {
1351                            Poll::Pending => {} // keep going
1352                            Poll::Ready(()) =>
1353                                this.terminate_active_thread(TlsAllocAction::Deallocate)?,
1354                        }
1355                    }
1356                }
1357                SchedulingAction::SleepAndWaitForIo(duration) => {
1358                    if this.machine.communicate() {
1359                        // When we're running with isolation disabled, instead of
1360                        // strictly sleeping the duration we allow waking up
1361                        // early for I/O events from the OS.
1362
1363                        this.poll_and_unblock(duration)?;
1364                    } else {
1365                        let duration = duration.expect(
1366                            "Infinite sleep should not be triggered when isolation is enabled",
1367                        );
1368                        this.machine.monotonic_clock.sleep(duration);
1369                    }
1370                }
1371            }
1372        }
1373    }
1374}