1 | use super::{RbBase, RbRead, RbRef, RbWrite}; |
2 | use core::{cell::Cell, marker::PhantomData, mem::MaybeUninit, num::NonZeroUsize, ptr}; |
3 | |
4 | /// Caching read end of some ring buffer. |
5 | /// |
6 | /// A free space of removed items is not visible for an opposite write end until [`Self::commit`]/[`Self::sync`] is called or `Self` is dropped. |
7 | /// Items inserted by an opposite write end is not visible for `Self` until [`Self::sync`] is called. |
8 | /// |
9 | /// Used to implement [`PostponedConsumer`](`crate::consumer::PostponedConsumer`). |
10 | pub struct RbReadCache<T, R: RbRef> |
11 | where |
12 | R::Rb: RbRead<T>, |
13 | { |
14 | target: R, |
15 | head: Cell<usize>, |
16 | tail: usize, |
17 | _phantom: PhantomData<T>, |
18 | } |
19 | |
20 | /// Caching write end of some ring buffer. |
21 | /// |
22 | /// Inserted items is not visible for an opposite write end until [`Self::commit`]/[`Self::sync`] is called or `Self` is dropped. |
23 | /// A free space of items removed by an opposite write end is not visible for `Self` until [`Self::sync`] is called. |
24 | /// |
25 | /// Used to implement [`PostponedConsumer`](`crate::consumer::PostponedConsumer`). |
26 | pub struct RbWriteCache<T, R: RbRef> |
27 | where |
28 | R::Rb: RbWrite<T>, |
29 | { |
30 | target: R, |
31 | head: usize, |
32 | tail: Cell<usize>, |
33 | _phantom: PhantomData<T>, |
34 | } |
35 | |
36 | impl<T, R: RbRef> RbBase<T> for RbReadCache<T, R> |
37 | where |
38 | R::Rb: RbRead<T>, |
39 | { |
40 | #[inline ] |
41 | unsafe fn slices( |
42 | &self, |
43 | head: usize, |
44 | tail: usize, |
45 | ) -> (&mut [MaybeUninit<T>], &mut [MaybeUninit<T>]) { |
46 | self.target.slices(head, tail) |
47 | } |
48 | |
49 | #[inline ] |
50 | fn capacity_nonzero(&self) -> NonZeroUsize { |
51 | self.target.capacity_nonzero() |
52 | } |
53 | |
54 | #[inline ] |
55 | fn head(&self) -> usize { |
56 | self.head.get() |
57 | } |
58 | |
59 | #[inline ] |
60 | fn tail(&self) -> usize { |
61 | self.tail |
62 | } |
63 | } |
64 | |
65 | impl<T, R: RbRef> RbBase<T> for RbWriteCache<T, R> |
66 | where |
67 | R::Rb: RbWrite<T>, |
68 | { |
69 | #[inline ] |
70 | unsafe fn slices( |
71 | &self, |
72 | head: usize, |
73 | tail: usize, |
74 | ) -> (&mut [MaybeUninit<T>], &mut [MaybeUninit<T>]) { |
75 | self.target.slices(head, tail) |
76 | } |
77 | |
78 | #[inline ] |
79 | fn capacity_nonzero(&self) -> NonZeroUsize { |
80 | self.target.capacity_nonzero() |
81 | } |
82 | |
83 | #[inline ] |
84 | fn head(&self) -> usize { |
85 | self.head |
86 | } |
87 | |
88 | #[inline ] |
89 | fn tail(&self) -> usize { |
90 | self.tail.get() |
91 | } |
92 | } |
93 | |
94 | impl<T, R: RbRef> RbRead<T> for RbReadCache<T, R> |
95 | where |
96 | R::Rb: RbRead<T>, |
97 | { |
98 | #[inline ] |
99 | unsafe fn set_head(&self, value: usize) { |
100 | self.head.set(val:value); |
101 | } |
102 | } |
103 | |
104 | impl<T, R: RbRef> RbWrite<T> for RbWriteCache<T, R> |
105 | where |
106 | R::Rb: RbWrite<T>, |
107 | { |
108 | #[inline ] |
109 | unsafe fn set_tail(&self, value: usize) { |
110 | self.tail.set(val:value); |
111 | } |
112 | } |
113 | |
114 | impl<T, R: RbRef> Drop for RbReadCache<T, R> |
115 | where |
116 | R::Rb: RbRead<T>, |
117 | { |
118 | fn drop(&mut self) { |
119 | self.commit(); |
120 | } |
121 | } |
122 | |
123 | impl<T, R: RbRef> Drop for RbWriteCache<T, R> |
124 | where |
125 | R::Rb: RbWrite<T>, |
126 | { |
127 | fn drop(&mut self) { |
128 | self.commit(); |
129 | } |
130 | } |
131 | |
132 | impl<T, R: RbRef> RbReadCache<T, R> |
133 | where |
134 | R::Rb: RbRead<T>, |
135 | { |
136 | /// Create new ring buffer cache. |
137 | /// |
138 | /// # Safety |
139 | /// |
140 | /// There must be only one instance containing the same ring buffer reference. |
141 | pub unsafe fn new(rb_ref: R) -> Self { |
142 | Self { |
143 | head: Cell::new(rb_ref.head()), |
144 | tail: rb_ref.tail(), |
145 | target: rb_ref, |
146 | _phantom: PhantomData, |
147 | } |
148 | } |
149 | |
150 | /// Commit changes to the ring buffer. |
151 | pub fn commit(&mut self) { |
152 | unsafe { self.target.set_head(self.head.get()) } |
153 | } |
154 | |
155 | /// Commit changes and fetch updates from the ring buffer. |
156 | pub fn sync(&mut self) { |
157 | self.commit(); |
158 | self.tail = self.target.tail(); |
159 | } |
160 | |
161 | /// Commit and destroy `Self` returning underlying ring buffer. |
162 | pub fn release(mut self) -> R { |
163 | self.commit(); |
164 | let self_uninit = MaybeUninit::new(self); |
165 | unsafe { ptr::read(&self_uninit.assume_init_ref().target) } |
166 | // Self will not be dropped. |
167 | } |
168 | } |
169 | |
170 | impl<T, R: RbRef> RbWriteCache<T, R> |
171 | where |
172 | R::Rb: RbWrite<T>, |
173 | { |
174 | /// Create new ring buffer cache. |
175 | /// |
176 | /// # Safety |
177 | /// |
178 | /// There must be only one instance containing the same ring buffer reference. |
179 | pub unsafe fn new(rb_ref: R) -> Self { |
180 | Self { |
181 | head: rb_ref.head(), |
182 | tail: Cell::new(rb_ref.tail()), |
183 | target: rb_ref, |
184 | _phantom: PhantomData, |
185 | } |
186 | } |
187 | |
188 | /// Commit changes to the ring buffer. |
189 | pub fn commit(&mut self) { |
190 | unsafe { self.target.set_tail(self.tail.get()) } |
191 | } |
192 | |
193 | /// Discard new items pushed since last sync. |
194 | pub fn discard(&mut self) { |
195 | let last_tail = self.target.tail(); |
196 | let (first, second) = unsafe { self.target.slices(last_tail, self.tail.get()) }; |
197 | for item_mut in first.iter_mut().chain(second.iter_mut()) { |
198 | unsafe { item_mut.assume_init_drop() }; |
199 | } |
200 | self.tail.set(last_tail); |
201 | } |
202 | |
203 | /// Commit changes and fetch updates from the ring buffer. |
204 | pub fn sync(&mut self) { |
205 | self.commit(); |
206 | self.head = self.target.head(); |
207 | } |
208 | |
209 | /// Commit and destroy `Self` returning underlying ring buffer. |
210 | pub fn release(mut self) -> R { |
211 | self.commit(); |
212 | let self_uninit = MaybeUninit::new(self); |
213 | unsafe { ptr::read(&self_uninit.assume_init_ref().target) } |
214 | // Self will not be dropped. |
215 | } |
216 | } |
217 | |