1use crate::task::AtomicWaker;
2use alloc::sync::Arc;
3use core::cell::UnsafeCell;
4use core::ptr;
5use core::sync::atomic::AtomicPtr;
6use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
7
8use super::abort::abort;
9use super::task::Task;
10
11pub(super) enum Dequeue<Fut> {
12 Data(*const Task<Fut>),
13 Empty,
14 Inconsistent,
15}
16
17pub(super) struct ReadyToRunQueue<Fut> {
18 // The waker of the task using `FuturesUnordered`.
19 pub(super) waker: AtomicWaker,
20
21 // Head/tail of the readiness queue
22 pub(super) head: AtomicPtr<Task<Fut>>,
23 pub(super) tail: UnsafeCell<*const Task<Fut>>,
24 pub(super) stub: Arc<Task<Fut>>,
25}
26
27/// An MPSC queue into which the tasks containing the futures are inserted
28/// whenever the future inside is scheduled for polling.
29impl<Fut> ReadyToRunQueue<Fut> {
30 /// The enqueue function from the 1024cores intrusive MPSC queue algorithm.
31 pub(super) fn enqueue(&self, task: *const Task<Fut>) {
32 unsafe {
33 debug_assert!((*task).queued.load(Relaxed));
34
35 // This action does not require any coordination
36 (*task).next_ready_to_run.store(ptr::null_mut(), Relaxed);
37
38 // Note that these atomic orderings come from 1024cores
39 let task = task as *mut _;
40 let prev = self.head.swap(task, AcqRel);
41 (*prev).next_ready_to_run.store(task, Release);
42 }
43 }
44
45 /// The dequeue function from the 1024cores intrusive MPSC queue algorithm
46 ///
47 /// Note that this is unsafe as it required mutual exclusion (only one
48 /// thread can call this) to be guaranteed elsewhere.
49 pub(super) unsafe fn dequeue(&self) -> Dequeue<Fut> {
50 let mut tail = *self.tail.get();
51 let mut next = (*tail).next_ready_to_run.load(Acquire);
52
53 if tail == self.stub() {
54 if next.is_null() {
55 return Dequeue::Empty;
56 }
57
58 *self.tail.get() = next;
59 tail = next;
60 next = (*next).next_ready_to_run.load(Acquire);
61 }
62
63 if !next.is_null() {
64 *self.tail.get() = next;
65 debug_assert!(tail != self.stub());
66 return Dequeue::Data(tail);
67 }
68
69 if self.head.load(Acquire) as *const _ != tail {
70 return Dequeue::Inconsistent;
71 }
72
73 self.enqueue(self.stub());
74
75 next = (*tail).next_ready_to_run.load(Acquire);
76
77 if !next.is_null() {
78 *self.tail.get() = next;
79 return Dequeue::Data(tail);
80 }
81
82 Dequeue::Inconsistent
83 }
84
85 pub(super) fn stub(&self) -> *const Task<Fut> {
86 Arc::as_ptr(&self.stub)
87 }
88
89 // Clear the queue of tasks.
90 //
91 // Note that each task has a strong reference count associated with it
92 // which is owned by the ready to run queue. This method just pulls out
93 // tasks and drops their refcounts.
94 //
95 // # Safety
96 //
97 // - All tasks **must** have had their futures dropped already (by FuturesUnordered::clear)
98 // - The caller **must** guarantee unique access to `self`
99 pub(crate) unsafe fn clear(&self) {
100 loop {
101 // SAFETY: We have the guarantee of mutual exclusion required by `dequeue`.
102 match self.dequeue() {
103 Dequeue::Empty => break,
104 Dequeue::Inconsistent => abort("inconsistent in drop"),
105 Dequeue::Data(ptr) => drop(Arc::from_raw(ptr)),
106 }
107 }
108 }
109}
110
111impl<Fut> Drop for ReadyToRunQueue<Fut> {
112 fn drop(&mut self) {
113 // Once we're in the destructor for `Inner<Fut>` we need to clear out
114 // the ready to run queue of tasks if there's anything left in there.
115
116 // All tasks have had their futures dropped already by the `FuturesUnordered`
117 // destructor above, and we have &mut self, so this is safe.
118 unsafe {
119 self.clear();
120 }
121 }
122}
123