tokio/runtime/scheduler/multi_thread/
worker.rs

1//! A scheduler is initialized with a fixed number of workers. Each worker is
2//! driven by a thread. Each worker has a "core" which contains data such as the
3//! run queue and other state. When `block_in_place` is called, the worker's
4//! "core" is handed off to a new thread allowing the scheduler to continue to
5//! make progress while the originating thread blocks.
6//!
7//! # Shutdown
8//!
9//! Shutting down the runtime involves the following steps:
10//!
11//!  1. The Shared::close method is called. This closes the inject queue and
12//!     `OwnedTasks` instance and wakes up all worker threads.
13//!
14//!  2. Each worker thread observes the close signal next time it runs
15//!     Core::maintenance by checking whether the inject queue is closed.
16//!     The `Core::is_shutdown` flag is set to true.
17//!
18//!  3. The worker thread calls `pre_shutdown` in parallel. Here, the worker
19//!     will keep removing tasks from `OwnedTasks` until it is empty. No new
20//!     tasks can be pushed to the `OwnedTasks` during or after this step as it
21//!     was closed in step 1.
22//!
23//!  5. The workers call Shared::shutdown to enter the single-threaded phase of
24//!     shutdown. These calls will push their core to `Shared::shutdown_cores`,
25//!     and the last thread to push its core will finish the shutdown procedure.
26//!
27//!  6. The local run queue of each core is emptied, then the inject queue is
28//!     emptied.
29//!
30//! At this point, shutdown has completed. It is not possible for any of the
31//! collections to contain any tasks at this point, as each collection was
32//! closed first, then emptied afterwards.
33//!
34//! ## Spawns during shutdown
35//!
36//! When spawning tasks during shutdown, there are two cases:
37//!
38//!  * The spawner observes the `OwnedTasks` being open, and the inject queue is
39//!    closed.
40//!  * The spawner observes the `OwnedTasks` being closed and doesn't check the
41//!    inject queue.
42//!
43//! The first case can only happen if the `OwnedTasks::bind` call happens before
44//! or during step 1 of shutdown. In this case, the runtime will clean up the
45//! task in step 3 of shutdown.
46//!
47//! In the latter case, the task was not spawned and the task is immediately
48//! cancelled by the spawner.
49//!
50//! The correctness of shutdown requires both the inject queue and `OwnedTasks`
51//! collection to have a closed bit. With a close bit on only the inject queue,
52//! spawning could run in to a situation where a task is successfully bound long
53//! after the runtime has shut down. With a close bit on only the `OwnedTasks`,
54//! the first spawning situation could result in the notification being pushed
55//! to the inject queue after step 6 of shutdown, which would leave a task in
56//! the inject queue indefinitely. This would be a ref-count cycle and a memory
57//! leak.
58
59use crate::loom::sync::{Arc, Mutex};
60use crate::runtime;
61use crate::runtime::scheduler::multi_thread::{
62    idle, queue, Counters, Handle, Idle, Overflow, Parker, Stats, TraceStatus, Unparker,
63};
64use crate::runtime::scheduler::{inject, Defer, Lock};
65use crate::runtime::task::OwnedTasks;
66use crate::runtime::{blocking, driver, scheduler, task, Config, SchedulerMetrics, WorkerMetrics};
67use crate::runtime::{context, TaskHooks};
68use crate::task::coop;
69use crate::util::atomic_cell::AtomicCell;
70use crate::util::rand::{FastRand, RngSeedGenerator};
71
72use std::cell::RefCell;
73use std::task::Waker;
74use std::thread;
75use std::time::Duration;
76
77mod metrics;
78
79cfg_taskdump! {
80    mod taskdump;
81}
82
83cfg_not_taskdump! {
84    mod taskdump_mock;
85}
86
87/// A scheduler worker
88pub(super) struct Worker {
89    /// Reference to scheduler's handle
90    handle: Arc<Handle>,
91
92    /// Index holding this worker's remote state
93    index: usize,
94
95    /// Used to hand-off a worker's core to another thread.
96    core: AtomicCell<Core>,
97}
98
99/// Core data
100struct Core {
101    /// Used to schedule bookkeeping tasks every so often.
102    tick: u32,
103
104    /// When a task is scheduled from a worker, it is stored in this slot. The
105    /// worker will check this slot for a task **before** checking the run
106    /// queue. This effectively results in the **last** scheduled task to be run
107    /// next (LIFO). This is an optimization for improving locality which
108    /// benefits message passing patterns and helps to reduce latency.
109    lifo_slot: Option<Notified>,
110
111    /// When `true`, locally scheduled tasks go to the LIFO slot. When `false`,
112    /// they go to the back of the `run_queue`.
113    lifo_enabled: bool,
114
115    /// The worker-local run queue.
116    run_queue: queue::Local<Arc<Handle>>,
117
118    /// True if the worker is currently searching for more work. Searching
119    /// involves attempting to steal from other workers.
120    is_searching: bool,
121
122    /// True if the scheduler is being shutdown
123    is_shutdown: bool,
124
125    /// True if the scheduler is being traced
126    is_traced: bool,
127
128    /// Parker
129    ///
130    /// Stored in an `Option` as the parker is added / removed to make the
131    /// borrow checker happy.
132    park: Option<Parker>,
133
134    /// Per-worker runtime stats
135    stats: Stats,
136
137    /// How often to check the global queue
138    global_queue_interval: u32,
139
140    /// Fast random number generator.
141    rand: FastRand,
142}
143
144/// State shared across all workers
145pub(crate) struct Shared {
146    /// Per-worker remote state. All other workers have access to this and is
147    /// how they communicate between each other.
148    remotes: Box<[Remote]>,
149
150    /// Global task queue used for:
151    ///  1. Submit work to the scheduler while **not** currently on a worker thread.
152    ///  2. Submit work to the scheduler when a worker run queue is saturated
153    pub(super) inject: inject::Shared<Arc<Handle>>,
154
155    /// Coordinates idle workers
156    idle: Idle,
157
158    /// Collection of all active tasks spawned onto this executor.
159    pub(crate) owned: OwnedTasks<Arc<Handle>>,
160
161    /// Data synchronized by the scheduler mutex
162    pub(super) synced: Mutex<Synced>,
163
164    /// Cores that have observed the shutdown signal
165    ///
166    /// The core is **not** placed back in the worker to avoid it from being
167    /// stolen by a thread that was spawned as part of `block_in_place`.
168    #[allow(clippy::vec_box)] // we're moving an already-boxed value
169    shutdown_cores: Mutex<Vec<Box<Core>>>,
170
171    /// The number of cores that have observed the trace signal.
172    pub(super) trace_status: TraceStatus,
173
174    /// Scheduler configuration options
175    config: Config,
176
177    /// Collects metrics from the runtime.
178    pub(super) scheduler_metrics: SchedulerMetrics,
179
180    pub(super) worker_metrics: Box<[WorkerMetrics]>,
181
182    /// Only held to trigger some code on drop. This is used to get internal
183    /// runtime metrics that can be useful when doing performance
184    /// investigations. This does nothing (empty struct, no drop impl) unless
185    /// the `tokio_internal_mt_counters` `cfg` flag is set.
186    _counters: Counters,
187}
188
189/// Data synchronized by the scheduler mutex
190pub(crate) struct Synced {
191    /// Synchronized state for `Idle`.
192    pub(super) idle: idle::Synced,
193
194    /// Synchronized state for `Inject`.
195    pub(crate) inject: inject::Synced,
196}
197
198/// Used to communicate with a worker from other threads.
199struct Remote {
200    /// Steals tasks from this worker.
201    pub(super) steal: queue::Steal<Arc<Handle>>,
202
203    /// Unparks the associated worker thread
204    unpark: Unparker,
205}
206
207/// Thread-local context
208pub(crate) struct Context {
209    /// Worker
210    worker: Arc<Worker>,
211
212    /// Core data
213    core: RefCell<Option<Box<Core>>>,
214
215    /// Tasks to wake after resource drivers are polled. This is mostly to
216    /// handle yielded tasks.
217    pub(crate) defer: Defer,
218}
219
220/// Starts the workers
221pub(crate) struct Launch(Vec<Arc<Worker>>);
222
223/// Running a task may consume the core. If the core is still available when
224/// running the task completes, it is returned. Otherwise, the worker will need
225/// to stop processing.
226type RunResult = Result<Box<Core>, ()>;
227
228/// A notified task handle
229type Notified = task::Notified<Arc<Handle>>;
230
231/// Value picked out of thin-air. Running the LIFO slot a handful of times
232/// seems sufficient to benefit from locality. More than 3 times probably is
233/// overweighing. The value can be tuned in the future with data that shows
234/// improvements.
235const MAX_LIFO_POLLS_PER_TICK: usize = 3;
236
237pub(super) fn create(
238    size: usize,
239    park: Parker,
240    driver_handle: driver::Handle,
241    blocking_spawner: blocking::Spawner,
242    seed_generator: RngSeedGenerator,
243    config: Config,
244) -> (Arc<Handle>, Launch) {
245    let mut cores = Vec::with_capacity(size);
246    let mut remotes = Vec::with_capacity(size);
247    let mut worker_metrics = Vec::with_capacity(size);
248
249    // Create the local queues
250    for _ in 0..size {
251        let (steal, run_queue) = queue::local();
252
253        let park = park.clone();
254        let unpark = park.unpark();
255        let metrics = WorkerMetrics::from_config(&config);
256        let stats = Stats::new(&metrics);
257
258        cores.push(Box::new(Core {
259            tick: 0,
260            lifo_slot: None,
261            lifo_enabled: !config.disable_lifo_slot,
262            run_queue,
263            is_searching: false,
264            is_shutdown: false,
265            is_traced: false,
266            park: Some(park),
267            global_queue_interval: stats.tuned_global_queue_interval(&config),
268            stats,
269            rand: FastRand::from_seed(config.seed_generator.next_seed()),
270        }));
271
272        remotes.push(Remote { steal, unpark });
273        worker_metrics.push(metrics);
274    }
275
276    let (idle, idle_synced) = Idle::new(size);
277    let (inject, inject_synced) = inject::Shared::new();
278
279    let remotes_len = remotes.len();
280    let handle = Arc::new(Handle {
281        task_hooks: TaskHooks::from_config(&config),
282        shared: Shared {
283            remotes: remotes.into_boxed_slice(),
284            inject,
285            idle,
286            owned: OwnedTasks::new(size),
287            synced: Mutex::new(Synced {
288                idle: idle_synced,
289                inject: inject_synced,
290            }),
291            shutdown_cores: Mutex::new(vec![]),
292            trace_status: TraceStatus::new(remotes_len),
293            config,
294            scheduler_metrics: SchedulerMetrics::new(),
295            worker_metrics: worker_metrics.into_boxed_slice(),
296            _counters: Counters,
297        },
298        driver: driver_handle,
299        blocking_spawner,
300        seed_generator,
301    });
302
303    let mut launch = Launch(vec![]);
304
305    for (index, core) in cores.drain(..).enumerate() {
306        launch.0.push(Arc::new(Worker {
307            handle: handle.clone(),
308            index,
309            core: AtomicCell::new(Some(core)),
310        }));
311    }
312
313    (handle, launch)
314}
315
316#[track_caller]
317pub(crate) fn block_in_place<F, R>(f: F) -> R
318where
319    F: FnOnce() -> R,
320{
321    // Try to steal the worker core back
322    struct Reset {
323        take_core: bool,
324        budget: coop::Budget,
325    }
326
327    impl Drop for Reset {
328        fn drop(&mut self) {
329            with_current(|maybe_cx| {
330                if let Some(cx) = maybe_cx {
331                    if self.take_core {
332                        let core = cx.worker.core.take();
333
334                        if core.is_some() {
335                            cx.worker.handle.shared.worker_metrics[cx.worker.index]
336                                .set_thread_id(thread::current().id());
337                        }
338
339                        let mut cx_core = cx.core.borrow_mut();
340                        assert!(cx_core.is_none());
341                        *cx_core = core;
342                    }
343
344                    // Reset the task budget as we are re-entering the
345                    // runtime.
346                    coop::set(self.budget);
347                }
348            });
349        }
350    }
351
352    let mut had_entered = false;
353    let mut take_core = false;
354
355    let setup_result = with_current(|maybe_cx| {
356        match (
357            crate::runtime::context::current_enter_context(),
358            maybe_cx.is_some(),
359        ) {
360            (context::EnterRuntime::Entered { .. }, true) => {
361                // We are on a thread pool runtime thread, so we just need to
362                // set up blocking.
363                had_entered = true;
364            }
365            (
366                context::EnterRuntime::Entered {
367                    allow_block_in_place,
368                },
369                false,
370            ) => {
371                // We are on an executor, but _not_ on the thread pool.  That is
372                // _only_ okay if we are in a thread pool runtime's block_on
373                // method:
374                if allow_block_in_place {
375                    had_entered = true;
376                    return Ok(());
377                } else {
378                    // This probably means we are on the current_thread runtime or in a
379                    // LocalSet, where it is _not_ okay to block.
380                    return Err(
381                        "can call blocking only when running on the multi-threaded runtime",
382                    );
383                }
384            }
385            (context::EnterRuntime::NotEntered, true) => {
386                // This is a nested call to block_in_place (we already exited).
387                // All the necessary setup has already been done.
388                return Ok(());
389            }
390            (context::EnterRuntime::NotEntered, false) => {
391                // We are outside of the tokio runtime, so blocking is fine.
392                // We can also skip all of the thread pool blocking setup steps.
393                return Ok(());
394            }
395        }
396
397        let cx = maybe_cx.expect("no .is_some() == false cases above should lead here");
398
399        // Get the worker core. If none is set, then blocking is fine!
400        let mut core = match cx.core.borrow_mut().take() {
401            Some(core) => core,
402            None => return Ok(()),
403        };
404
405        // If we heavily call `spawn_blocking`, there might be no available thread to
406        // run this core. Except for the task in the lifo_slot, all tasks can be
407        // stolen, so we move the task out of the lifo_slot to the run_queue.
408        if let Some(task) = core.lifo_slot.take() {
409            core.run_queue
410                .push_back_or_overflow(task, &*cx.worker.handle, &mut core.stats);
411        }
412
413        // We are taking the core from the context and sending it to another
414        // thread.
415        take_core = true;
416
417        // The parker should be set here
418        assert!(core.park.is_some());
419
420        // In order to block, the core must be sent to another thread for
421        // execution.
422        //
423        // First, move the core back into the worker's shared core slot.
424        cx.worker.core.set(core);
425
426        // Next, clone the worker handle and send it to a new thread for
427        // processing.
428        //
429        // Once the blocking task is done executing, we will attempt to
430        // steal the core back.
431        let worker = cx.worker.clone();
432        runtime::spawn_blocking(move || run(worker));
433        Ok(())
434    });
435
436    if let Err(panic_message) = setup_result {
437        panic!("{}", panic_message);
438    }
439
440    if had_entered {
441        // Unset the current task's budget. Blocking sections are not
442        // constrained by task budgets.
443        let _reset = Reset {
444            take_core,
445            budget: coop::stop(),
446        };
447
448        crate::runtime::context::exit_runtime(f)
449    } else {
450        f()
451    }
452}
453
454impl Launch {
455    pub(crate) fn launch(mut self) {
456        for worker in self.0.drain(..) {
457            runtime::spawn_blocking(move || run(worker));
458        }
459    }
460}
461
462fn run(worker: Arc<Worker>) {
463    #[allow(dead_code)]
464    struct AbortOnPanic;
465
466    impl Drop for AbortOnPanic {
467        fn drop(&mut self) {
468            if std::thread::panicking() {
469                eprintln!("worker thread panicking; aborting process");
470                std::process::abort();
471            }
472        }
473    }
474
475    // Catching panics on worker threads in tests is quite tricky. Instead, when
476    // debug assertions are enabled, we just abort the process.
477    #[cfg(debug_assertions)]
478    let _abort_on_panic = AbortOnPanic;
479
480    // Acquire a core. If this fails, then another thread is running this
481    // worker and there is nothing further to do.
482    let core = match worker.core.take() {
483        Some(core) => core,
484        None => return,
485    };
486
487    worker.handle.shared.worker_metrics[worker.index].set_thread_id(thread::current().id());
488
489    let handle = scheduler::Handle::MultiThread(worker.handle.clone());
490
491    crate::runtime::context::enter_runtime(&handle, true, |_| {
492        // Set the worker context.
493        let cx = scheduler::Context::MultiThread(Context {
494            worker,
495            core: RefCell::new(None),
496            defer: Defer::new(),
497        });
498
499        context::set_scheduler(&cx, || {
500            let cx = cx.expect_multi_thread();
501
502            // This should always be an error. It only returns a `Result` to support
503            // using `?` to short circuit.
504            assert!(cx.run(core).is_err());
505
506            // Check if there are any deferred tasks to notify. This can happen when
507            // the worker core is lost due to `block_in_place()` being called from
508            // within the task.
509            cx.defer.wake();
510        });
511    });
512}
513
514impl Context {
515    fn run(&self, mut core: Box<Core>) -> RunResult {
516        // Reset `lifo_enabled` here in case the core was previously stolen from
517        // a task that had the LIFO slot disabled.
518        self.reset_lifo_enabled(&mut core);
519
520        // Start as "processing" tasks as polling tasks from the local queue
521        // will be one of the first things we do.
522        core.stats.start_processing_scheduled_tasks();
523
524        while !core.is_shutdown {
525            self.assert_lifo_enabled_is_correct(&core);
526
527            if core.is_traced {
528                core = self.worker.handle.trace_core(core);
529            }
530
531            // Increment the tick
532            core.tick();
533
534            // Run maintenance, if needed
535            core = self.maintenance(core);
536
537            // First, check work available to the current worker.
538            if let Some(task) = core.next_task(&self.worker) {
539                core = self.run_task(task, core)?;
540                continue;
541            }
542
543            // We consumed all work in the queues and will start searching for work.
544            core.stats.end_processing_scheduled_tasks();
545
546            // There is no more **local** work to process, try to steal work
547            // from other workers.
548            if let Some(task) = core.steal_work(&self.worker) {
549                // Found work, switch back to processing
550                core.stats.start_processing_scheduled_tasks();
551                core = self.run_task(task, core)?;
552            } else {
553                // Wait for work
554                core = if !self.defer.is_empty() {
555                    self.park_timeout(core, Some(Duration::from_millis(0)))
556                } else {
557                    self.park(core)
558                };
559                core.stats.start_processing_scheduled_tasks();
560            }
561        }
562
563        core.pre_shutdown(&self.worker);
564        // Signal shutdown
565        self.worker.handle.shutdown_core(core);
566        Err(())
567    }
568
569    fn run_task(&self, task: Notified, mut core: Box<Core>) -> RunResult {
570        #[cfg(tokio_unstable)]
571        let task_meta = task.task_meta();
572
573        let task = self.worker.handle.shared.owned.assert_owner(task);
574
575        // Make sure the worker is not in the **searching** state. This enables
576        // another idle worker to try to steal work.
577        core.transition_from_searching(&self.worker);
578
579        self.assert_lifo_enabled_is_correct(&core);
580
581        // Measure the poll start time. Note that we may end up polling other
582        // tasks under this measurement. In this case, the tasks came from the
583        // LIFO slot and are considered part of the current task for scheduling
584        // purposes. These tasks inherent the "parent"'s limits.
585        core.stats.start_poll();
586
587        // Make the core available to the runtime context
588        *self.core.borrow_mut() = Some(core);
589
590        // Run the task
591        coop::budget(|| {
592            // Unlike the poll time above, poll start callback is attached to the task id,
593            // so it is tightly associated with the actual poll invocation.
594            #[cfg(tokio_unstable)]
595            self.worker
596                .handle
597                .task_hooks
598                .poll_start_callback(&task_meta);
599
600            task.run();
601
602            #[cfg(tokio_unstable)]
603            self.worker.handle.task_hooks.poll_stop_callback(&task_meta);
604
605            let mut lifo_polls = 0;
606
607            // As long as there is budget remaining and a task exists in the
608            // `lifo_slot`, then keep running.
609            loop {
610                // Check if we still have the core. If not, the core was stolen
611                // by another worker.
612                let mut core = match self.core.borrow_mut().take() {
613                    Some(core) => core,
614                    None => {
615                        // In this case, we cannot call `reset_lifo_enabled()`
616                        // because the core was stolen. The stealer will handle
617                        // that at the top of `Context::run`
618                        return Err(());
619                    }
620                };
621
622                // Check for a task in the LIFO slot
623                let task = match core.lifo_slot.take() {
624                    Some(task) => task,
625                    None => {
626                        self.reset_lifo_enabled(&mut core);
627                        core.stats.end_poll();
628                        return Ok(core);
629                    }
630                };
631
632                if !coop::has_budget_remaining() {
633                    core.stats.end_poll();
634
635                    // Not enough budget left to run the LIFO task, push it to
636                    // the back of the queue and return.
637                    core.run_queue.push_back_or_overflow(
638                        task,
639                        &*self.worker.handle,
640                        &mut core.stats,
641                    );
642                    // If we hit this point, the LIFO slot should be enabled.
643                    // There is no need to reset it.
644                    debug_assert!(core.lifo_enabled);
645                    return Ok(core);
646                }
647
648                // Track that we are about to run a task from the LIFO slot.
649                lifo_polls += 1;
650                super::counters::inc_lifo_schedules();
651
652                // Disable the LIFO slot if we reach our limit
653                //
654                // In ping-ping style workloads where task A notifies task B,
655                // which notifies task A again, continuously prioritizing the
656                // LIFO slot can cause starvation as these two tasks will
657                // repeatedly schedule the other. To mitigate this, we limit the
658                // number of times the LIFO slot is prioritized.
659                if lifo_polls >= MAX_LIFO_POLLS_PER_TICK {
660                    core.lifo_enabled = false;
661                    super::counters::inc_lifo_capped();
662                }
663
664                // Run the LIFO task, then loop
665                *self.core.borrow_mut() = Some(core);
666                let task = self.worker.handle.shared.owned.assert_owner(task);
667
668                #[cfg(tokio_unstable)]
669                let task_meta = task.task_meta();
670
671                #[cfg(tokio_unstable)]
672                self.worker
673                    .handle
674                    .task_hooks
675                    .poll_start_callback(&task_meta);
676
677                task.run();
678
679                #[cfg(tokio_unstable)]
680                self.worker.handle.task_hooks.poll_stop_callback(&task_meta);
681            }
682        })
683    }
684
685    fn reset_lifo_enabled(&self, core: &mut Core) {
686        core.lifo_enabled = !self.worker.handle.shared.config.disable_lifo_slot;
687    }
688
689    fn assert_lifo_enabled_is_correct(&self, core: &Core) {
690        debug_assert_eq!(
691            core.lifo_enabled,
692            !self.worker.handle.shared.config.disable_lifo_slot
693        );
694    }
695
696    fn maintenance(&self, mut core: Box<Core>) -> Box<Core> {
697        if core.tick % self.worker.handle.shared.config.event_interval == 0 {
698            super::counters::inc_num_maintenance();
699
700            core.stats.end_processing_scheduled_tasks();
701
702            // Call `park` with a 0 timeout. This enables the I/O driver, timer, ...
703            // to run without actually putting the thread to sleep.
704            core = self.park_timeout(core, Some(Duration::from_millis(0)));
705
706            // Run regularly scheduled maintenance
707            core.maintenance(&self.worker);
708
709            core.stats.start_processing_scheduled_tasks();
710        }
711
712        core
713    }
714
715    /// Parks the worker thread while waiting for tasks to execute.
716    ///
717    /// This function checks if indeed there's no more work left to be done before parking.
718    /// Also important to notice that, before parking, the worker thread will try to take
719    /// ownership of the Driver (IO/Time) and dispatch any events that might have fired.
720    /// Whenever a worker thread executes the Driver loop, all waken tasks are scheduled
721    /// in its own local queue until the queue saturates (ntasks > `LOCAL_QUEUE_CAPACITY`).
722    /// When the local queue is saturated, the overflow tasks are added to the injection queue
723    /// from where other workers can pick them up.
724    /// Also, we rely on the workstealing algorithm to spread the tasks amongst workers
725    /// after all the IOs get dispatched
726    fn park(&self, mut core: Box<Core>) -> Box<Core> {
727        if let Some(f) = &self.worker.handle.shared.config.before_park {
728            f();
729        }
730
731        if core.transition_to_parked(&self.worker) {
732            while !core.is_shutdown && !core.is_traced {
733                core.stats.about_to_park();
734                core.stats
735                    .submit(&self.worker.handle.shared.worker_metrics[self.worker.index]);
736
737                core = self.park_timeout(core, None);
738
739                core.stats.unparked();
740
741                // Run regularly scheduled maintenance
742                core.maintenance(&self.worker);
743
744                if core.transition_from_parked(&self.worker) {
745                    break;
746                }
747            }
748        }
749
750        if let Some(f) = &self.worker.handle.shared.config.after_unpark {
751            f();
752        }
753        core
754    }
755
756    fn park_timeout(&self, mut core: Box<Core>, duration: Option<Duration>) -> Box<Core> {
757        self.assert_lifo_enabled_is_correct(&core);
758
759        // Take the parker out of core
760        let mut park = core.park.take().expect("park missing");
761
762        // Store `core` in context
763        *self.core.borrow_mut() = Some(core);
764
765        // Park thread
766        if let Some(timeout) = duration {
767            park.park_timeout(&self.worker.handle.driver, timeout);
768        } else {
769            park.park(&self.worker.handle.driver);
770        }
771
772        self.defer.wake();
773
774        // Remove `core` from context
775        core = self.core.borrow_mut().take().expect("core missing");
776
777        // Place `park` back in `core`
778        core.park = Some(park);
779
780        if core.should_notify_others() {
781            self.worker.handle.notify_parked_local();
782        }
783
784        core
785    }
786
787    pub(crate) fn defer(&self, waker: &Waker) {
788        if self.core.borrow().is_none() {
789            // If there is no core, then the worker is currently in a block_in_place. In this case,
790            // we cannot use the defer queue as we aren't really in the current runtime.
791            waker.wake_by_ref();
792        } else {
793            self.defer.defer(waker);
794        }
795    }
796}
797
798impl Core {
799    /// Increment the tick
800    fn tick(&mut self) {
801        self.tick = self.tick.wrapping_add(1);
802    }
803
804    /// Return the next notified task available to this worker.
805    fn next_task(&mut self, worker: &Worker) -> Option<Notified> {
806        if self.tick % self.global_queue_interval == 0 {
807            // Update the global queue interval, if needed
808            self.tune_global_queue_interval(worker);
809
810            worker
811                .handle
812                .next_remote_task()
813                .or_else(|| self.next_local_task())
814        } else {
815            let maybe_task = self.next_local_task();
816
817            if maybe_task.is_some() {
818                return maybe_task;
819            }
820
821            if worker.inject().is_empty() {
822                return None;
823            }
824
825            // Other threads can only **remove** tasks from the current worker's
826            // `run_queue`. So, we can be confident that by the time we call
827            // `run_queue.push_back` below, there will be *at least* `cap`
828            // available slots in the queue.
829            let cap = usize::min(
830                self.run_queue.remaining_slots(),
831                self.run_queue.max_capacity() / 2,
832            );
833
834            // The worker is currently idle, pull a batch of work from the
835            // injection queue. We don't want to pull *all* the work so other
836            // workers can also get some.
837            let n = usize::min(
838                worker.inject().len() / worker.handle.shared.remotes.len() + 1,
839                cap,
840            );
841
842            // Take at least one task since the first task is returned directly
843            // and not pushed onto the local queue.
844            let n = usize::max(1, n);
845
846            let mut synced = worker.handle.shared.synced.lock();
847            // safety: passing in the correct `inject::Synced`.
848            let mut tasks = unsafe { worker.inject().pop_n(&mut synced.inject, n) };
849
850            // Pop the first task to return immediately
851            let ret = tasks.next();
852
853            // Push the rest of the on the run queue
854            self.run_queue.push_back(tasks);
855
856            ret
857        }
858    }
859
860    fn next_local_task(&mut self) -> Option<Notified> {
861        self.lifo_slot.take().or_else(|| self.run_queue.pop())
862    }
863
864    /// Function responsible for stealing tasks from another worker
865    ///
866    /// Note: Only if less than half the workers are searching for tasks to steal
867    /// a new worker will actually try to steal. The idea is to make sure not all
868    /// workers will be trying to steal at the same time.
869    fn steal_work(&mut self, worker: &Worker) -> Option<Notified> {
870        if !self.transition_to_searching(worker) {
871            return None;
872        }
873
874        let num = worker.handle.shared.remotes.len();
875        // Start from a random worker
876        let start = self.rand.fastrand_n(num as u32) as usize;
877
878        for i in 0..num {
879            let i = (start + i) % num;
880
881            // Don't steal from ourself! We know we don't have work.
882            if i == worker.index {
883                continue;
884            }
885
886            let target = &worker.handle.shared.remotes[i];
887            if let Some(task) = target
888                .steal
889                .steal_into(&mut self.run_queue, &mut self.stats)
890            {
891                return Some(task);
892            }
893        }
894
895        // Fallback on checking the global queue
896        worker.handle.next_remote_task()
897    }
898
899    fn transition_to_searching(&mut self, worker: &Worker) -> bool {
900        if !self.is_searching {
901            self.is_searching = worker.handle.shared.idle.transition_worker_to_searching();
902        }
903
904        self.is_searching
905    }
906
907    fn transition_from_searching(&mut self, worker: &Worker) {
908        if !self.is_searching {
909            return;
910        }
911
912        self.is_searching = false;
913        worker.handle.transition_worker_from_searching();
914    }
915
916    fn has_tasks(&self) -> bool {
917        self.lifo_slot.is_some() || self.run_queue.has_tasks()
918    }
919
920    fn should_notify_others(&self) -> bool {
921        // If there are tasks available to steal, but this worker is not
922        // looking for tasks to steal, notify another worker.
923        if self.is_searching {
924            return false;
925        }
926        self.lifo_slot.is_some() as usize + self.run_queue.len() > 1
927    }
928
929    /// Prepares the worker state for parking.
930    ///
931    /// Returns true if the transition happened, false if there is work to do first.
932    fn transition_to_parked(&mut self, worker: &Worker) -> bool {
933        // Workers should not park if they have work to do
934        if self.has_tasks() || self.is_traced {
935            return false;
936        }
937
938        // When the final worker transitions **out** of searching to parked, it
939        // must check all the queues one last time in case work materialized
940        // between the last work scan and transitioning out of searching.
941        let is_last_searcher = worker.handle.shared.idle.transition_worker_to_parked(
942            &worker.handle.shared,
943            worker.index,
944            self.is_searching,
945        );
946
947        // The worker is no longer searching. Setting this is the local cache
948        // only.
949        self.is_searching = false;
950
951        if is_last_searcher {
952            worker.handle.notify_if_work_pending();
953        }
954
955        true
956    }
957
958    /// Returns `true` if the transition happened.
959    fn transition_from_parked(&mut self, worker: &Worker) -> bool {
960        // If a task is in the lifo slot/run queue, then we must unpark regardless of
961        // being notified
962        if self.has_tasks() {
963            // When a worker wakes, it should only transition to the "searching"
964            // state when the wake originates from another worker *or* a new task
965            // is pushed. We do *not* want the worker to transition to "searching"
966            // when it wakes when the I/O driver receives new events.
967            self.is_searching = !worker
968                .handle
969                .shared
970                .idle
971                .unpark_worker_by_id(&worker.handle.shared, worker.index);
972            return true;
973        }
974
975        if worker
976            .handle
977            .shared
978            .idle
979            .is_parked(&worker.handle.shared, worker.index)
980        {
981            return false;
982        }
983
984        // When unparked, the worker is in the searching state.
985        self.is_searching = true;
986        true
987    }
988
989    /// Runs maintenance work such as checking the pool's state.
990    fn maintenance(&mut self, worker: &Worker) {
991        self.stats
992            .submit(&worker.handle.shared.worker_metrics[worker.index]);
993
994        if !self.is_shutdown {
995            // Check if the scheduler has been shutdown
996            let synced = worker.handle.shared.synced.lock();
997            self.is_shutdown = worker.inject().is_closed(&synced.inject);
998        }
999
1000        if !self.is_traced {
1001            // Check if the worker should be tracing.
1002            self.is_traced = worker.handle.shared.trace_status.trace_requested();
1003        }
1004    }
1005
1006    /// Signals all tasks to shut down, and waits for them to complete. Must run
1007    /// before we enter the single-threaded phase of shutdown processing.
1008    fn pre_shutdown(&mut self, worker: &Worker) {
1009        // Start from a random inner list
1010        let start = self
1011            .rand
1012            .fastrand_n(worker.handle.shared.owned.get_shard_size() as u32);
1013        // Signal to all tasks to shut down.
1014        worker
1015            .handle
1016            .shared
1017            .owned
1018            .close_and_shutdown_all(start as usize);
1019
1020        self.stats
1021            .submit(&worker.handle.shared.worker_metrics[worker.index]);
1022    }
1023
1024    /// Shuts down the core.
1025    fn shutdown(&mut self, handle: &Handle) {
1026        // Take the core
1027        let mut park = self.park.take().expect("park missing");
1028
1029        // Drain the queue
1030        while self.next_local_task().is_some() {}
1031
1032        park.shutdown(&handle.driver);
1033    }
1034
1035    fn tune_global_queue_interval(&mut self, worker: &Worker) {
1036        let next = self
1037            .stats
1038            .tuned_global_queue_interval(&worker.handle.shared.config);
1039
1040        // Smooth out jitter
1041        if u32::abs_diff(self.global_queue_interval, next) > 2 {
1042            self.global_queue_interval = next;
1043        }
1044    }
1045}
1046
1047impl Worker {
1048    /// Returns a reference to the scheduler's injection queue.
1049    fn inject(&self) -> &inject::Shared<Arc<Handle>> {
1050        &self.handle.shared.inject
1051    }
1052}
1053
1054impl Handle {
1055    pub(super) fn schedule_task(&self, task: Notified, is_yield: bool) {
1056        with_current(|maybe_cx| {
1057            if let Some(cx) = maybe_cx {
1058                // Make sure the task is part of the **current** scheduler.
1059                if self.ptr_eq(&cx.worker.handle) {
1060                    // And the current thread still holds a core
1061                    if let Some(core) = cx.core.borrow_mut().as_mut() {
1062                        self.schedule_local(core, task, is_yield);
1063                        return;
1064                    }
1065                }
1066            }
1067
1068            // Otherwise, use the inject queue.
1069            self.push_remote_task(task);
1070            self.notify_parked_remote();
1071        });
1072    }
1073
1074    pub(super) fn schedule_option_task_without_yield(&self, task: Option<Notified>) {
1075        if let Some(task) = task {
1076            self.schedule_task(task, false);
1077        }
1078    }
1079
1080    fn schedule_local(&self, core: &mut Core, task: Notified, is_yield: bool) {
1081        core.stats.inc_local_schedule_count();
1082
1083        // Spawning from the worker thread. If scheduling a "yield" then the
1084        // task must always be pushed to the back of the queue, enabling other
1085        // tasks to be executed. If **not** a yield, then there is more
1086        // flexibility and the task may go to the front of the queue.
1087        let should_notify = if is_yield || !core.lifo_enabled {
1088            core.run_queue
1089                .push_back_or_overflow(task, self, &mut core.stats);
1090            true
1091        } else {
1092            // Push to the LIFO slot
1093            let prev = core.lifo_slot.take();
1094            let ret = prev.is_some();
1095
1096            if let Some(prev) = prev {
1097                core.run_queue
1098                    .push_back_or_overflow(prev, self, &mut core.stats);
1099            }
1100
1101            core.lifo_slot = Some(task);
1102
1103            ret
1104        };
1105
1106        // Only notify if not currently parked. If `park` is `None`, then the
1107        // scheduling is from a resource driver. As notifications often come in
1108        // batches, the notification is delayed until the park is complete.
1109        if should_notify && core.park.is_some() {
1110            self.notify_parked_local();
1111        }
1112    }
1113
1114    fn next_remote_task(&self) -> Option<Notified> {
1115        if self.shared.inject.is_empty() {
1116            return None;
1117        }
1118
1119        let mut synced = self.shared.synced.lock();
1120        // safety: passing in correct `idle::Synced`
1121        unsafe { self.shared.inject.pop(&mut synced.inject) }
1122    }
1123
1124    fn push_remote_task(&self, task: Notified) {
1125        self.shared.scheduler_metrics.inc_remote_schedule_count();
1126
1127        let mut synced = self.shared.synced.lock();
1128        // safety: passing in correct `idle::Synced`
1129        unsafe {
1130            self.shared.inject.push(&mut synced.inject, task);
1131        }
1132    }
1133
1134    pub(super) fn close(&self) {
1135        if self
1136            .shared
1137            .inject
1138            .close(&mut self.shared.synced.lock().inject)
1139        {
1140            self.notify_all();
1141        }
1142    }
1143
1144    fn notify_parked_local(&self) {
1145        super::counters::inc_num_inc_notify_local();
1146
1147        if let Some(index) = self.shared.idle.worker_to_notify(&self.shared) {
1148            super::counters::inc_num_unparks_local();
1149            self.shared.remotes[index].unpark.unpark(&self.driver);
1150        }
1151    }
1152
1153    fn notify_parked_remote(&self) {
1154        if let Some(index) = self.shared.idle.worker_to_notify(&self.shared) {
1155            self.shared.remotes[index].unpark.unpark(&self.driver);
1156        }
1157    }
1158
1159    pub(super) fn notify_all(&self) {
1160        for remote in &self.shared.remotes[..] {
1161            remote.unpark.unpark(&self.driver);
1162        }
1163    }
1164
1165    fn notify_if_work_pending(&self) {
1166        for remote in &self.shared.remotes[..] {
1167            if !remote.steal.is_empty() {
1168                self.notify_parked_local();
1169                return;
1170            }
1171        }
1172
1173        if !self.shared.inject.is_empty() {
1174            self.notify_parked_local();
1175        }
1176    }
1177
1178    fn transition_worker_from_searching(&self) {
1179        if self.shared.idle.transition_worker_from_searching() {
1180            // We are the final searching worker. Because work was found, we
1181            // need to notify another worker.
1182            self.notify_parked_local();
1183        }
1184    }
1185
1186    /// Signals that a worker has observed the shutdown signal and has replaced
1187    /// its core back into its handle.
1188    ///
1189    /// If all workers have reached this point, the final cleanup is performed.
1190    fn shutdown_core(&self, core: Box<Core>) {
1191        let mut cores = self.shared.shutdown_cores.lock();
1192        cores.push(core);
1193
1194        if cores.len() != self.shared.remotes.len() {
1195            return;
1196        }
1197
1198        debug_assert!(self.shared.owned.is_empty());
1199
1200        for mut core in cores.drain(..) {
1201            core.shutdown(self);
1202        }
1203
1204        // Drain the injection queue
1205        //
1206        // We already shut down every task, so we can simply drop the tasks.
1207        while let Some(task) = self.next_remote_task() {
1208            drop(task);
1209        }
1210    }
1211
1212    fn ptr_eq(&self, other: &Handle) -> bool {
1213        std::ptr::eq(self, other)
1214    }
1215}
1216
1217impl Overflow<Arc<Handle>> for Handle {
1218    fn push(&self, task: task::Notified<Arc<Handle>>) {
1219        self.push_remote_task(task);
1220    }
1221
1222    fn push_batch<I>(&self, iter: I)
1223    where
1224        I: Iterator<Item = task::Notified<Arc<Handle>>>,
1225    {
1226        unsafe {
1227            self.shared.inject.push_batch(self, iter);
1228        }
1229    }
1230}
1231
1232pub(crate) struct InjectGuard<'a> {
1233    lock: crate::loom::sync::MutexGuard<'a, Synced>,
1234}
1235
1236impl<'a> AsMut<inject::Synced> for InjectGuard<'a> {
1237    fn as_mut(&mut self) -> &mut inject::Synced {
1238        &mut self.lock.inject
1239    }
1240}
1241
1242impl<'a> Lock<inject::Synced> for &'a Handle {
1243    type Handle = InjectGuard<'a>;
1244
1245    fn lock(self) -> Self::Handle {
1246        InjectGuard {
1247            lock: self.shared.synced.lock(),
1248        }
1249    }
1250}
1251
1252#[track_caller]
1253fn with_current<R>(f: impl FnOnce(Option<&Context>) -> R) -> R {
1254    use scheduler::Context::MultiThread;
1255
1256    context::with_scheduler(|ctx| match ctx {
1257        Some(MultiThread(ctx)) => f(Some(ctx)),
1258        _ => f(None),
1259    })
1260}