1 | use core::sync::atomic::{AtomicU32, Ordering}; |
2 | |
3 | #[derive (Clone, Copy)] |
4 | pub(crate) struct Token(()); |
5 | |
6 | /// Creates a token and passes it to the closure. |
7 | /// |
8 | /// This is a no-op replacement for `CriticalSection::with` because we don't need any locking. |
9 | pub(crate) fn locked<R>(f: impl FnOnce(Token) -> R) -> R { |
10 | f(Token(())) |
11 | } |
12 | |
13 | /// Task is spawned (has a future) |
14 | pub(crate) const STATE_SPAWNED: u32 = 1 << 0; |
15 | /// Task is in the executor run queue |
16 | pub(crate) const STATE_RUN_QUEUED: u32 = 1 << 1; |
17 | |
18 | pub(crate) struct State { |
19 | state: AtomicU32, |
20 | } |
21 | |
22 | impl State { |
23 | pub const fn new() -> State { |
24 | Self { |
25 | state: AtomicU32::new(0), |
26 | } |
27 | } |
28 | |
29 | /// If task is idle, mark it as spawned + run_queued and return true. |
30 | #[inline (always)] |
31 | pub fn spawn(&self) -> bool { |
32 | self.state |
33 | .compare_exchange(0, STATE_SPAWNED | STATE_RUN_QUEUED, Ordering::AcqRel, Ordering::Acquire) |
34 | .is_ok() |
35 | } |
36 | |
37 | /// Unmark the task as spawned. |
38 | #[inline (always)] |
39 | pub fn despawn(&self) { |
40 | self.state.fetch_and(!STATE_SPAWNED, Ordering::AcqRel); |
41 | } |
42 | |
43 | /// Mark the task as run-queued if it's spawned and isn't already run-queued. Run the given |
44 | /// function if the task was successfully marked. |
45 | #[inline (always)] |
46 | pub fn run_enqueue(&self, f: impl FnOnce(Token)) { |
47 | let prev = self.state.fetch_or(STATE_RUN_QUEUED, Ordering::AcqRel); |
48 | if prev & STATE_RUN_QUEUED == 0 { |
49 | locked(f); |
50 | } |
51 | } |
52 | |
53 | /// Unmark the task as run-queued. Return whether the task is spawned. |
54 | #[inline (always)] |
55 | pub fn run_dequeue(&self) { |
56 | self.state.fetch_and(!STATE_RUN_QUEUED, Ordering::AcqRel); |
57 | } |
58 | } |
59 | |