1 | use super::{Shared, Synced}; |
2 | |
3 | use crate::runtime::scheduler::Lock; |
4 | use crate::runtime::task; |
5 | |
6 | use std::sync::atomic::Ordering::Release; |
7 | |
8 | impl<'a> Lock<Synced> for &'a mut Synced { |
9 | type Handle = &'a mut Synced; |
10 | |
11 | fn lock(self) -> Self::Handle { |
12 | self |
13 | } |
14 | } |
15 | |
16 | impl AsMut<Synced> for Synced { |
17 | fn as_mut(&mut self) -> &mut Synced { |
18 | self |
19 | } |
20 | } |
21 | |
22 | impl<T: 'static> Shared<T> { |
23 | /// Pushes several values into the queue. |
24 | /// |
25 | /// # Safety |
26 | /// |
27 | /// Must be called with the same `Synced` instance returned by `Inject::new` |
28 | #[inline ] |
29 | pub(crate) unsafe fn push_batch<L, I>(&self, shared: L, mut iter: I) |
30 | where |
31 | L: Lock<Synced>, |
32 | I: Iterator<Item = task::Notified<T>>, |
33 | { |
34 | let first = match iter.next() { |
35 | Some(first) => first.into_raw(), |
36 | None => return, |
37 | }; |
38 | |
39 | // Link up all the tasks. |
40 | let mut prev = first; |
41 | let mut counter = 1; |
42 | |
43 | // We are going to be called with an `std::iter::Chain`, and that |
44 | // iterator overrides `for_each` to something that is easier for the |
45 | // compiler to optimize than a loop. |
46 | iter.for_each(|next| { |
47 | let next = next.into_raw(); |
48 | |
49 | // safety: Holding the Notified for a task guarantees exclusive |
50 | // access to the `queue_next` field. |
51 | unsafe { prev.set_queue_next(Some(next)) }; |
52 | prev = next; |
53 | counter += 1; |
54 | }); |
55 | |
56 | // Now that the tasks are linked together, insert them into the |
57 | // linked list. |
58 | self.push_batch_inner(shared, first, prev, counter); |
59 | } |
60 | |
61 | /// Inserts several tasks that have been linked together into the queue. |
62 | /// |
63 | /// The provided head and tail may be be the same task. In this case, a |
64 | /// single task is inserted. |
65 | #[inline ] |
66 | unsafe fn push_batch_inner<L>( |
67 | &self, |
68 | shared: L, |
69 | batch_head: task::RawTask, |
70 | batch_tail: task::RawTask, |
71 | num: usize, |
72 | ) where |
73 | L: Lock<Synced>, |
74 | { |
75 | debug_assert!(unsafe { batch_tail.get_queue_next().is_none() }); |
76 | |
77 | let mut synced = shared.lock(); |
78 | let synced = synced.as_mut(); |
79 | |
80 | if let Some(tail) = synced.tail { |
81 | unsafe { |
82 | tail.set_queue_next(Some(batch_head)); |
83 | } |
84 | } else { |
85 | synced.head = Some(batch_head); |
86 | } |
87 | |
88 | synced.tail = Some(batch_tail); |
89 | |
90 | // Increment the count. |
91 | // |
92 | // safety: All updates to the len atomic are guarded by the mutex. As |
93 | // such, a non-atomic load followed by a store is safe. |
94 | let len = self.len.unsync_load(); |
95 | |
96 | self.len.store(len + num, Release); |
97 | } |
98 | } |
99 | |