1 | #![cfg_attr (gpdma, allow(unused))] |
2 | |
3 | use core::future::poll_fn; |
4 | use core::task::{Poll, Waker}; |
5 | |
6 | use crate::dma::word::Word; |
7 | |
8 | pub trait DmaCtrl { |
9 | /// Get the NDTR register value, i.e. the space left in the underlying |
10 | /// buffer until the dma writer wraps. |
11 | fn get_remaining_transfers(&self) -> usize; |
12 | |
13 | /// Reset the transfer completed counter to 0 and return the value just prior to the reset. |
14 | fn reset_complete_count(&mut self) -> usize; |
15 | |
16 | /// Set the waker for a running poll_fn |
17 | fn set_waker(&mut self, waker: &Waker); |
18 | } |
19 | |
20 | #[derive (Debug, PartialEq)] |
21 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
22 | pub enum Error { |
23 | Overrun, |
24 | /// the newly read DMA positions don't make sense compared to the previous |
25 | /// ones. This can usually only occur due to wrong Driver implementation, if |
26 | /// the driver author (or the user using raw metapac code) directly resets |
27 | /// the channel for instance. |
28 | DmaUnsynced, |
29 | } |
30 | |
31 | #[derive (Debug, Clone, Copy, Default)] |
32 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
33 | struct DmaIndex { |
34 | complete_count: usize, |
35 | pos: usize, |
36 | } |
37 | |
38 | impl DmaIndex { |
39 | fn reset(&mut self) { |
40 | self.pos = 0; |
41 | self.complete_count = 0; |
42 | } |
43 | |
44 | fn as_index(&self, cap: usize, offset: usize) -> usize { |
45 | (self.pos + offset) % cap |
46 | } |
47 | |
48 | fn dma_sync(&mut self, cap: usize, dma: &mut impl DmaCtrl) { |
49 | // Important! |
50 | // The ordering of the first two lines matters! |
51 | // If changed, the code will detect a wrong +capacity |
52 | // jump at wrap-around. |
53 | let count_diff = dma.reset_complete_count(); |
54 | let pos = cap - dma.get_remaining_transfers(); |
55 | self.pos = if pos < self.pos && count_diff == 0 { |
56 | cap - 1 |
57 | } else { |
58 | pos |
59 | }; |
60 | |
61 | self.complete_count += count_diff; |
62 | } |
63 | |
64 | fn advance(&mut self, cap: usize, steps: usize) { |
65 | let next = self.pos + steps; |
66 | self.complete_count += next / cap; |
67 | self.pos = next % cap; |
68 | } |
69 | |
70 | fn normalize(lhs: &mut DmaIndex, rhs: &mut DmaIndex) { |
71 | let min_count = lhs.complete_count.min(rhs.complete_count); |
72 | lhs.complete_count -= min_count; |
73 | rhs.complete_count -= min_count; |
74 | } |
75 | |
76 | fn diff(&self, cap: usize, rhs: &DmaIndex) -> isize { |
77 | (self.complete_count * cap + self.pos) as isize - (rhs.complete_count * cap + rhs.pos) as isize |
78 | } |
79 | } |
80 | |
81 | pub struct ReadableDmaRingBuffer<'a, W: Word> { |
82 | dma_buf: &'a mut [W], |
83 | write_index: DmaIndex, |
84 | read_index: DmaIndex, |
85 | } |
86 | |
87 | impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> { |
88 | /// Construct an empty buffer. |
89 | pub fn new(dma_buf: &'a mut [W]) -> Self { |
90 | Self { |
91 | dma_buf, |
92 | write_index: Default::default(), |
93 | read_index: Default::default(), |
94 | } |
95 | } |
96 | |
97 | /// Reset the ring buffer to its initial state. |
98 | pub fn reset(&mut self, dma: &mut impl DmaCtrl) { |
99 | dma.reset_complete_count(); |
100 | self.write_index.reset(); |
101 | self.write_index.dma_sync(self.cap(), dma); |
102 | self.read_index = self.write_index; |
103 | } |
104 | |
105 | /// Get the full ringbuffer capacity. |
106 | pub const fn cap(&self) -> usize { |
107 | self.dma_buf.len() |
108 | } |
109 | |
110 | /// Get the available readable dma samples. |
111 | pub fn len(&mut self, dma: &mut impl DmaCtrl) -> Result<usize, Error> { |
112 | self.write_index.dma_sync(self.cap(), dma); |
113 | DmaIndex::normalize(&mut self.write_index, &mut self.read_index); |
114 | |
115 | let diff = self.write_index.diff(self.cap(), &self.read_index); |
116 | |
117 | if diff < 0 { |
118 | Err(Error::DmaUnsynced) |
119 | } else if diff > self.cap() as isize { |
120 | Err(Error::Overrun) |
121 | } else { |
122 | Ok(diff as usize) |
123 | } |
124 | } |
125 | |
126 | /// Read elements from the ring buffer. |
127 | /// |
128 | /// Return a tuple of the length read and the length remaining in the buffer |
129 | /// If not all of the elements were read, then there will be some elements in the buffer remaining |
130 | /// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read |
131 | /// Error is returned if the portion to be read was overwritten by the DMA controller, |
132 | /// in which case the rinbuffer will automatically reset itself. |
133 | pub fn read(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), Error> { |
134 | self.read_raw(dma, buf).inspect_err(|_e| { |
135 | self.reset(dma); |
136 | }) |
137 | } |
138 | |
139 | /// Read an exact number of elements from the ringbuffer. |
140 | /// |
141 | /// Returns the remaining number of elements available for immediate reading. |
142 | /// Error is returned if the portion to be read was overwritten by the DMA controller. |
143 | /// |
144 | /// Async/Wake Behavior: |
145 | /// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point, |
146 | /// and when it wraps around. This means that when called with a buffer of length 'M', when this |
147 | /// ring buffer was created with a buffer of size 'N': |
148 | /// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source. |
149 | /// - Otherwise, this function may need up to N/2 extra elements to arrive before returning. |
150 | pub async fn read_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &mut [W]) -> Result<usize, Error> { |
151 | let mut read_data = 0; |
152 | let buffer_len = buffer.len(); |
153 | |
154 | poll_fn(|cx| { |
155 | dma.set_waker(cx.waker()); |
156 | |
157 | match self.read(dma, &mut buffer[read_data..buffer_len]) { |
158 | Ok((len, remaining)) => { |
159 | read_data += len; |
160 | if read_data == buffer_len { |
161 | Poll::Ready(Ok(remaining)) |
162 | } else { |
163 | Poll::Pending |
164 | } |
165 | } |
166 | Err(e) => Poll::Ready(Err(e)), |
167 | } |
168 | }) |
169 | .await |
170 | } |
171 | |
172 | fn read_raw(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), Error> { |
173 | let readable = self.len(dma)?.min(buf.len()); |
174 | for i in 0..readable { |
175 | buf[i] = self.read_buf(i); |
176 | } |
177 | let available = self.len(dma)?; |
178 | self.read_index.advance(self.cap(), readable); |
179 | Ok((readable, available - readable)) |
180 | } |
181 | |
182 | fn read_buf(&self, offset: usize) -> W { |
183 | unsafe { |
184 | core::ptr::read_volatile( |
185 | self.dma_buf |
186 | .as_ptr() |
187 | .offset(self.read_index.as_index(self.cap(), offset) as isize), |
188 | ) |
189 | } |
190 | } |
191 | } |
192 | |
193 | pub struct WritableDmaRingBuffer<'a, W: Word> { |
194 | dma_buf: &'a mut [W], |
195 | read_index: DmaIndex, |
196 | write_index: DmaIndex, |
197 | } |
198 | |
199 | impl<'a, W: Word> WritableDmaRingBuffer<'a, W> { |
200 | /// Construct a ringbuffer filled with the given buffer data. |
201 | pub fn new(dma_buf: &'a mut [W]) -> Self { |
202 | let len = dma_buf.len(); |
203 | Self { |
204 | dma_buf, |
205 | read_index: Default::default(), |
206 | write_index: DmaIndex { |
207 | complete_count: 0, |
208 | pos: len, |
209 | }, |
210 | } |
211 | } |
212 | |
213 | /// Reset the ring buffer to its initial state. The buffer after the reset will be full. |
214 | pub fn reset(&mut self, dma: &mut impl DmaCtrl) { |
215 | dma.reset_complete_count(); |
216 | self.read_index.reset(); |
217 | self.read_index.dma_sync(self.cap(), dma); |
218 | self.write_index = self.read_index; |
219 | self.write_index.advance(self.cap(), self.cap()); |
220 | } |
221 | |
222 | /// Get the remaining writable dma samples. |
223 | pub fn len(&mut self, dma: &mut impl DmaCtrl) -> Result<usize, Error> { |
224 | self.read_index.dma_sync(self.cap(), dma); |
225 | DmaIndex::normalize(&mut self.read_index, &mut self.write_index); |
226 | |
227 | let diff = self.write_index.diff(self.cap(), &self.read_index); |
228 | |
229 | if diff < 0 { |
230 | Err(Error::Overrun) |
231 | } else if diff > self.cap() as isize { |
232 | Err(Error::DmaUnsynced) |
233 | } else { |
234 | Ok(self.cap().saturating_sub(diff as usize)) |
235 | } |
236 | } |
237 | |
238 | /// Get the full ringbuffer capacity. |
239 | pub const fn cap(&self) -> usize { |
240 | self.dma_buf.len() |
241 | } |
242 | |
243 | /// Append data to the ring buffer. |
244 | /// Returns a tuple of the data written and the remaining write capacity in the buffer. |
245 | /// Error is returned if the portion to be written was previously read by the DMA controller. |
246 | /// In this case, the ringbuffer will automatically reset itself, giving a full buffer worth of |
247 | /// leeway between the write index and the DMA. |
248 | pub fn write(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), Error> { |
249 | self.write_raw(dma, buf).inspect_err(|_e| { |
250 | self.reset(dma); |
251 | }) |
252 | } |
253 | |
254 | /// Write elements directly to the buffer. |
255 | /// |
256 | /// Subsequent writes will overwrite the content of the buffer, so it is not useful to call this more than once. |
257 | /// Data is aligned towards the end of the buffer. |
258 | /// |
259 | /// In case of success, returns the written length, and the empty space in front of the written block. |
260 | /// Fails if the data to write exceeds the buffer capacity. |
261 | pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> { |
262 | if buf.len() > self.cap() { |
263 | return Err(Error::Overrun); |
264 | } |
265 | |
266 | let start = self.cap() - buf.len(); |
267 | for (i, data) in buf.iter().enumerate() { |
268 | self.write_buf(start + i, *data) |
269 | } |
270 | let written = buf.len().min(self.cap()); |
271 | Ok((written, self.cap() - written)) |
272 | } |
273 | |
274 | /// Wait for any ring buffer write error. |
275 | pub async fn wait_write_error(&mut self, dma: &mut impl DmaCtrl) -> Result<usize, Error> { |
276 | poll_fn(|cx| { |
277 | dma.set_waker(cx.waker()); |
278 | |
279 | match self.len(dma) { |
280 | Ok(_) => Poll::Pending, |
281 | Err(e) => Poll::Ready(Err(e)), |
282 | } |
283 | }) |
284 | .await |
285 | } |
286 | |
287 | /// Write an exact number of elements to the ringbuffer. |
288 | pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, Error> { |
289 | let mut written_data = 0; |
290 | let buffer_len = buffer.len(); |
291 | |
292 | poll_fn(|cx| { |
293 | dma.set_waker(cx.waker()); |
294 | |
295 | match self.write(dma, &buffer[written_data..buffer_len]) { |
296 | Ok((len, remaining)) => { |
297 | written_data += len; |
298 | if written_data == buffer_len { |
299 | Poll::Ready(Ok(remaining)) |
300 | } else { |
301 | Poll::Pending |
302 | } |
303 | } |
304 | Err(e) => Poll::Ready(Err(e)), |
305 | } |
306 | }) |
307 | .await |
308 | } |
309 | |
310 | fn write_raw(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), Error> { |
311 | let writable = self.len(dma)?.min(buf.len()); |
312 | for i in 0..writable { |
313 | self.write_buf(i, buf[i]); |
314 | } |
315 | let available = self.len(dma)?; |
316 | self.write_index.advance(self.cap(), writable); |
317 | Ok((writable, available - writable)) |
318 | } |
319 | |
320 | fn write_buf(&mut self, offset: usize, value: W) { |
321 | unsafe { |
322 | core::ptr::write_volatile( |
323 | self.dma_buf |
324 | .as_mut_ptr() |
325 | .offset(self.write_index.as_index(self.cap(), offset) as isize), |
326 | value, |
327 | ) |
328 | } |
329 | } |
330 | } |
331 | |
332 | #[cfg (test)] |
333 | mod tests; |
334 | |