1use crate::cfg;
2use crate::sync::atomic::{AtomicUsize, Ordering};
3use std::{fmt, marker::PhantomData};
4
5pub(super) struct TransferStack<C = cfg::DefaultConfig> {
6 head: AtomicUsize,
7 _cfg: PhantomData<fn(C)>,
8}
9
10impl<C: cfg::Config> TransferStack<C> {
11 pub(super) fn new() -> Self {
12 Self {
13 head: AtomicUsize::new(super::Addr::<C>::NULL),
14 _cfg: PhantomData,
15 }
16 }
17
18 pub(super) fn pop_all(&self) -> Option<usize> {
19 let val = self.head.swap(super::Addr::<C>::NULL, Ordering::Acquire);
20 test_println!("-> pop {:#x}", val);
21 if val == super::Addr::<C>::NULL {
22 None
23 } else {
24 Some(val)
25 }
26 }
27
28 fn push(&self, new_head: usize, before: impl Fn(usize)) {
29 // We loop to win the race to set the new head. The `next` variable
30 // is the next slot on the stack which needs to be pointed to by the
31 // new head.
32 let mut next = self.head.load(Ordering::Relaxed);
33 loop {
34 test_println!("-> next {:#x}", next);
35 before(next);
36
37 match self
38 .head
39 .compare_exchange(next, new_head, Ordering::Release, Ordering::Relaxed)
40 {
41 // lost the race!
42 Err(actual) => {
43 test_println!("-> retry!");
44 next = actual;
45 }
46 Ok(_) => {
47 test_println!("-> successful; next={:#x}", next);
48 return;
49 }
50 }
51 }
52 }
53}
54
55impl<C: cfg::Config> super::FreeList<C> for TransferStack<C> {
56 fn push<T>(&self, new_head: usize, slot: &super::Slot<T, C>) {
57 self.push(new_head, |next: usize| slot.set_next(next))
58 }
59}
60
61impl<C> fmt::Debug for TransferStack<C> {
62 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
63 f&mut DebugStruct<'_, '_>.debug_struct("TransferStack")
64 .field(
65 name:"head",
66 &format_args!("{:#0x}", &self.head.load(Ordering::Relaxed)),
67 )
68 .finish()
69 }
70}
71
72#[cfg(all(loom, test))]
73mod test {
74 use super::*;
75 use crate::{sync::UnsafeCell, test_util};
76 use loom::thread;
77 use std::sync::Arc;
78
79 #[test]
80 fn transfer_stack() {
81 test_util::run_model("transfer_stack", || {
82 let causalities = [UnsafeCell::new(999), UnsafeCell::new(999)];
83 let shared = Arc::new((causalities, TransferStack::<cfg::DefaultConfig>::new()));
84 let shared1 = shared.clone();
85 let shared2 = shared.clone();
86
87 let t1 = thread::spawn(move || {
88 let (causalities, stack) = &*shared1;
89 stack.push(0, |prev| {
90 causalities[0].with_mut(|c| unsafe {
91 *c = 0;
92 });
93 test_println!("prev={:#x}", prev)
94 });
95 });
96 let t2 = thread::spawn(move || {
97 let (causalities, stack) = &*shared2;
98 stack.push(1, |prev| {
99 causalities[1].with_mut(|c| unsafe {
100 *c = 1;
101 });
102 test_println!("prev={:#x}", prev)
103 });
104 });
105
106 let (causalities, stack) = &*shared;
107 let mut idx = stack.pop_all();
108 while idx == None {
109 idx = stack.pop_all();
110 thread::yield_now();
111 }
112 let idx = idx.unwrap();
113 causalities[idx].with(|val| unsafe {
114 assert_eq!(
115 *val, idx,
116 "UnsafeCell write must happen-before index is pushed to the stack!"
117 );
118 });
119
120 t1.join().unwrap();
121 t2.join().unwrap();
122 });
123 }
124}
125