| 1 | use alloc::sync::{Arc, Weak}; |
| 2 | use core::cell::UnsafeCell; |
| 3 | use core::sync::atomic::Ordering::{self, Relaxed, SeqCst}; |
| 4 | use core::sync::atomic::{AtomicBool, AtomicPtr}; |
| 5 | |
| 6 | use super::abort::abort; |
| 7 | use super::ReadyToRunQueue; |
| 8 | use crate::task::ArcWake; |
| 9 | |
| 10 | pub(super) struct Task<Fut> { |
| 11 | // The future |
| 12 | pub(super) future: UnsafeCell<Option<Fut>>, |
| 13 | |
| 14 | // Next pointer for linked list tracking all active tasks (use |
| 15 | // `spin_next_all` to read when access is shared across threads) |
| 16 | pub(super) next_all: AtomicPtr<Task<Fut>>, |
| 17 | |
| 18 | // Previous task in linked list tracking all active tasks |
| 19 | pub(super) prev_all: UnsafeCell<*const Task<Fut>>, |
| 20 | |
| 21 | // Length of the linked list tracking all active tasks when this node was |
| 22 | // inserted (use `spin_next_all` to synchronize before reading when access |
| 23 | // is shared across threads) |
| 24 | pub(super) len_all: UnsafeCell<usize>, |
| 25 | |
| 26 | // Next pointer in ready to run queue |
| 27 | pub(super) next_ready_to_run: AtomicPtr<Task<Fut>>, |
| 28 | |
| 29 | // Queue that we'll be enqueued to when woken |
| 30 | pub(super) ready_to_run_queue: Weak<ReadyToRunQueue<Fut>>, |
| 31 | |
| 32 | // Whether or not this task is currently in the ready to run queue |
| 33 | pub(super) queued: AtomicBool, |
| 34 | |
| 35 | // Whether the future was awoken during polling |
| 36 | // It is possible for this flag to be set to true after the polling, |
| 37 | // but it will be ignored. |
| 38 | pub(super) woken: AtomicBool, |
| 39 | } |
| 40 | |
| 41 | // `Task` can be sent across threads safely because it ensures that |
| 42 | // the underlying `Fut` type isn't touched from any of its methods. |
| 43 | // |
| 44 | // The parent (`super`) module is trusted not to access `future` |
| 45 | // across different threads. |
| 46 | unsafe impl<Fut> Send for Task<Fut> {} |
| 47 | unsafe impl<Fut> Sync for Task<Fut> {} |
| 48 | |
| 49 | impl<Fut> ArcWake for Task<Fut> { |
| 50 | fn wake_by_ref(arc_self: &Arc<Self>) { |
| 51 | let inner = match arc_self.ready_to_run_queue.upgrade() { |
| 52 | Some(inner) => inner, |
| 53 | None => return, |
| 54 | }; |
| 55 | |
| 56 | arc_self.woken.store(true, Relaxed); |
| 57 | |
| 58 | // It's our job to enqueue this task it into the ready to run queue. To |
| 59 | // do this we set the `queued` flag, and if successful we then do the |
| 60 | // actual queueing operation, ensuring that we're only queued once. |
| 61 | // |
| 62 | // Once the task is inserted call `wake` to notify the parent task, |
| 63 | // as it'll want to come along and run our task later. |
| 64 | // |
| 65 | // Note that we don't change the reference count of the task here, |
| 66 | // we merely enqueue the raw pointer. The `FuturesUnordered` |
| 67 | // implementation guarantees that if we set the `queued` flag that |
| 68 | // there's a reference count held by the main `FuturesUnordered` queue |
| 69 | // still. |
| 70 | let prev = arc_self.queued.swap(true, SeqCst); |
| 71 | if !prev { |
| 72 | inner.enqueue(Arc::as_ptr(arc_self)); |
| 73 | inner.waker.wake(); |
| 74 | } |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | impl<Fut> Task<Fut> { |
| 79 | /// Returns a waker reference for this task without cloning the Arc. |
| 80 | pub(super) unsafe fn waker_ref(this: &Arc<Self>) -> waker_ref::WakerRef<'_> { |
| 81 | unsafe { waker_ref::waker_ref(this) } |
| 82 | } |
| 83 | |
| 84 | /// Spins until `next_all` is no longer set to `pending_next_all`. |
| 85 | /// |
| 86 | /// The temporary `pending_next_all` value is typically overwritten fairly |
| 87 | /// quickly after a node is inserted into the list of all futures, so this |
| 88 | /// should rarely spin much. |
| 89 | /// |
| 90 | /// When it returns, the correct `next_all` value is returned. |
| 91 | /// |
| 92 | /// `Relaxed` or `Acquire` ordering can be used. `Acquire` ordering must be |
| 93 | /// used before `len_all` can be safely read. |
| 94 | #[inline ] |
| 95 | pub(super) fn spin_next_all( |
| 96 | &self, |
| 97 | pending_next_all: *mut Self, |
| 98 | ordering: Ordering, |
| 99 | ) -> *const Self { |
| 100 | loop { |
| 101 | let next = self.next_all.load(ordering); |
| 102 | if next != pending_next_all { |
| 103 | return next; |
| 104 | } |
| 105 | } |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | impl<Fut> Drop for Task<Fut> { |
| 110 | fn drop(&mut self) { |
| 111 | // Since `Task<Fut>` is sent across all threads for any lifetime, |
| 112 | // regardless of `Fut`, we, to guarantee memory safety, can't actually |
| 113 | // touch `Fut` at any time except when we have a reference to the |
| 114 | // `FuturesUnordered` itself . |
| 115 | // |
| 116 | // Consequently it *should* be the case that we always drop futures from |
| 117 | // the `FuturesUnordered` instance. This is a bomb, just in case there's |
| 118 | // a bug in that logic. |
| 119 | unsafe { |
| 120 | if (*self.future.get()).is_some() { |
| 121 | abort("future still here when dropping" ); |
| 122 | } |
| 123 | } |
| 124 | } |
| 125 | } |
| 126 | |
| 127 | mod waker_ref { |
| 128 | use alloc::sync::Arc; |
| 129 | use core::marker::PhantomData; |
| 130 | use core::mem; |
| 131 | use core::mem::ManuallyDrop; |
| 132 | use core::ops::Deref; |
| 133 | use core::task::{RawWaker, RawWakerVTable, Waker}; |
| 134 | use futures_task::ArcWake; |
| 135 | |
| 136 | pub(crate) struct WakerRef<'a> { |
| 137 | waker: ManuallyDrop<Waker>, |
| 138 | _marker: PhantomData<&'a ()>, |
| 139 | } |
| 140 | |
| 141 | impl WakerRef<'_> { |
| 142 | #[inline ] |
| 143 | fn new_unowned(waker: ManuallyDrop<Waker>) -> Self { |
| 144 | Self { waker, _marker: PhantomData } |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | impl Deref for WakerRef<'_> { |
| 149 | type Target = Waker; |
| 150 | |
| 151 | #[inline ] |
| 152 | fn deref(&self) -> &Waker { |
| 153 | &self.waker |
| 154 | } |
| 155 | } |
| 156 | |
| 157 | /// Copy of `future_task::waker_ref` without `W: 'static` bound. |
| 158 | /// |
| 159 | /// # Safety |
| 160 | /// |
| 161 | /// The caller must guarantee that use-after-free will not occur. |
| 162 | #[inline ] |
| 163 | pub(crate) unsafe fn waker_ref<W>(wake: &Arc<W>) -> WakerRef<'_> |
| 164 | where |
| 165 | W: ArcWake, |
| 166 | { |
| 167 | // simply copy the pointer instead of using Arc::into_raw, |
| 168 | // as we don't actually keep a refcount by using ManuallyDrop.< |
| 169 | let ptr = Arc::as_ptr(wake).cast::<()>(); |
| 170 | |
| 171 | let waker = |
| 172 | ManuallyDrop::new(unsafe { Waker::from_raw(RawWaker::new(ptr, waker_vtable::<W>())) }); |
| 173 | WakerRef::new_unowned(waker) |
| 174 | } |
| 175 | |
| 176 | fn waker_vtable<W: ArcWake>() -> &'static RawWakerVTable { |
| 177 | &RawWakerVTable::new( |
| 178 | clone_arc_raw::<W>, |
| 179 | wake_arc_raw::<W>, |
| 180 | wake_by_ref_arc_raw::<W>, |
| 181 | drop_arc_raw::<W>, |
| 182 | ) |
| 183 | } |
| 184 | |
| 185 | // FIXME: panics on Arc::clone / refcount changes could wreak havoc on the |
| 186 | // code here. We should guard against this by aborting. |
| 187 | |
| 188 | unsafe fn increase_refcount<T: ArcWake>(data: *const ()) { |
| 189 | // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop |
| 190 | let arc = mem::ManuallyDrop::new(unsafe { Arc::<T>::from_raw(data.cast::<T>()) }); |
| 191 | // Now increase refcount, but don't drop new refcount either |
| 192 | let _arc_clone: mem::ManuallyDrop<_> = arc.clone(); |
| 193 | } |
| 194 | |
| 195 | unsafe fn clone_arc_raw<T: ArcWake>(data: *const ()) -> RawWaker { |
| 196 | unsafe { increase_refcount::<T>(data) } |
| 197 | RawWaker::new(data, waker_vtable::<T>()) |
| 198 | } |
| 199 | |
| 200 | unsafe fn wake_arc_raw<T: ArcWake>(data: *const ()) { |
| 201 | let arc: Arc<T> = unsafe { Arc::from_raw(data.cast::<T>()) }; |
| 202 | ArcWake::wake(arc); |
| 203 | } |
| 204 | |
| 205 | unsafe fn wake_by_ref_arc_raw<T: ArcWake>(data: *const ()) { |
| 206 | // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop |
| 207 | let arc = mem::ManuallyDrop::new(unsafe { Arc::<T>::from_raw(data.cast::<T>()) }); |
| 208 | ArcWake::wake_by_ref(&arc); |
| 209 | } |
| 210 | |
| 211 | unsafe fn drop_arc_raw<T: ArcWake>(data: *const ()) { |
| 212 | drop(unsafe { Arc::<T>::from_raw(data.cast::<T>()) }) |
| 213 | } |
| 214 | } |
| 215 | |