| 1 | use core::ptr; |
| 2 | use core::ptr::NonNull; |
| 3 | use core::sync::atomic::{AtomicPtr, Ordering}; |
| 4 | |
| 5 | use super::{TaskHeader, TaskRef}; |
| 6 | use crate::raw::util::SyncUnsafeCell; |
| 7 | |
| 8 | pub(crate) struct RunQueueItem { |
| 9 | next: SyncUnsafeCell<Option<TaskRef>>, |
| 10 | } |
| 11 | |
| 12 | impl RunQueueItem { |
| 13 | pub const fn new() -> Self { |
| 14 | Self { |
| 15 | next: SyncUnsafeCell::new(None), |
| 16 | } |
| 17 | } |
| 18 | } |
| 19 | |
| 20 | /// Atomic task queue using a very, very simple lock-free linked-list queue: |
| 21 | /// |
| 22 | /// To enqueue a task, task.next is set to the old head, and head is atomically set to task. |
| 23 | /// |
| 24 | /// Dequeuing is done in batches: the queue is emptied by atomically replacing head with |
| 25 | /// null. Then the batch is iterated following the next pointers until null is reached. |
| 26 | /// |
| 27 | /// Note that batches will be iterated in the reverse order as they were enqueued. This is OK |
| 28 | /// for our purposes: it can't create fairness problems since the next batch won't run until the |
| 29 | /// current batch is completely processed, so even if a task enqueues itself instantly (for example |
| 30 | /// by waking its own waker) can't prevent other tasks from running. |
| 31 | pub(crate) struct RunQueue { |
| 32 | head: AtomicPtr<TaskHeader>, |
| 33 | } |
| 34 | |
| 35 | impl RunQueue { |
| 36 | pub const fn new() -> Self { |
| 37 | Self { |
| 38 | head: AtomicPtr::new(ptr::null_mut()), |
| 39 | } |
| 40 | } |
| 41 | |
| 42 | /// Enqueues an item. Returns true if the queue was empty. |
| 43 | /// |
| 44 | /// # Safety |
| 45 | /// |
| 46 | /// `item` must NOT be already enqueued in any queue. |
| 47 | #[inline (always)] |
| 48 | pub(crate) unsafe fn enqueue(&self, task: TaskRef, _: super::state::Token) -> bool { |
| 49 | let mut was_empty = false; |
| 50 | |
| 51 | self.head |
| 52 | .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |prev| { |
| 53 | was_empty = prev.is_null(); |
| 54 | unsafe { |
| 55 | // safety: the pointer is either null or valid |
| 56 | let prev = NonNull::new(prev).map(|ptr| TaskRef::from_ptr(ptr.as_ptr())); |
| 57 | // safety: there are no concurrent accesses to `next` |
| 58 | task.header().run_queue_item.next.set(prev); |
| 59 | } |
| 60 | Some(task.as_ptr() as *mut _) |
| 61 | }) |
| 62 | .ok(); |
| 63 | |
| 64 | was_empty |
| 65 | } |
| 66 | |
| 67 | /// Empty the queue, then call `on_task` for each task that was in the queue. |
| 68 | /// NOTE: It is OK for `on_task` to enqueue more tasks. In this case they're left in the queue |
| 69 | /// and will be processed by the *next* call to `dequeue_all`, *not* the current one. |
| 70 | pub(crate) fn dequeue_all(&self, on_task: impl Fn(TaskRef)) { |
| 71 | // Atomically empty the queue. |
| 72 | let ptr = self.head.swap(ptr::null_mut(), Ordering::AcqRel); |
| 73 | |
| 74 | // safety: the pointer is either null or valid |
| 75 | let mut next = unsafe { NonNull::new(ptr).map(|ptr| TaskRef::from_ptr(ptr.as_ptr())) }; |
| 76 | |
| 77 | // Iterate the linked list of tasks that were previously in the queue. |
| 78 | while let Some(task) = next { |
| 79 | // If the task re-enqueues itself, the `next` pointer will get overwritten. |
| 80 | // Therefore, first read the next pointer, and only then process the task. |
| 81 | // safety: there are no concurrent accesses to `next` |
| 82 | next = unsafe { task.header().run_queue_item.next.get() }; |
| 83 | |
| 84 | task.header().state.run_dequeue(); |
| 85 | on_task(task); |
| 86 | } |
| 87 | } |
| 88 | } |
| 89 | |