1#![macro_use]
2
3use core::future::Future;
4use core::pin::Pin;
5use core::sync::atomic::{fence, Ordering};
6use core::task::{Context, Poll};
7
8use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
9use embassy_sync::waitqueue::AtomicWaker;
10
11use super::word::{Word, WordSize};
12use super::{AnyChannel, Channel, Dir, Request, STATE};
13use crate::interrupt::typelevel::Interrupt;
14use crate::interrupt::Priority;
15use crate::pac;
16use crate::pac::gpdma::vals;
17
18pub(crate) struct ChannelInfo {
19 pub(crate) dma: pac::gpdma::Gpdma,
20 pub(crate) num: usize,
21 #[cfg(feature = "_dual-core")]
22 pub(crate) irq: pac::Interrupt,
23}
24
25/// GPDMA transfer options.
26#[derive(Debug, Copy, Clone, PartialEq, Eq)]
27#[cfg_attr(feature = "defmt", derive(defmt::Format))]
28#[non_exhaustive]
29pub struct TransferOptions {}
30
31impl Default for TransferOptions {
32 fn default() -> Self {
33 Self {}
34 }
35}
36
37impl From<WordSize> for vals::Dw {
38 fn from(raw: WordSize) -> Self {
39 match raw {
40 WordSize::OneByte => Self::BYTE,
41 WordSize::TwoBytes => Self::HALF_WORD,
42 WordSize::FourBytes => Self::WORD,
43 }
44 }
45}
46
47pub(crate) struct ChannelState {
48 waker: AtomicWaker,
49}
50
51impl ChannelState {
52 pub(crate) const NEW: Self = Self {
53 waker: AtomicWaker::new(),
54 };
55}
56
57/// safety: must be called only once
58pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) {
59 foreach_interrupt! {
60 ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => {
61 crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority);
62 #[cfg(not(feature = "_dual-core"))]
63 crate::interrupt::typelevel::$irq::enable();
64 };
65 }
66 crate::_generated::init_gpdma();
67}
68
69impl AnyChannel {
70 /// Safety: Must be called with a matching set of parameters for a valid dma channel
71 pub(crate) unsafe fn on_irq(&self) {
72 let info = self.info();
73 #[cfg(feature = "_dual-core")]
74 {
75 use embassy_hal_internal::interrupt::InterruptExt as _;
76 info.irq.enable();
77 }
78
79 let state = &STATE[self.id as usize];
80
81 let ch = info.dma.ch(info.num);
82 let sr = ch.sr().read();
83
84 if sr.dtef() {
85 panic!(
86 "DMA: data transfer error on DMA@{:08x} channel {}",
87 info.dma.as_ptr() as u32,
88 info.num
89 );
90 }
91 if sr.usef() {
92 panic!(
93 "DMA: user settings error on DMA@{:08x} channel {}",
94 info.dma.as_ptr() as u32,
95 info.num
96 );
97 }
98
99 if sr.suspf() || sr.tcf() {
100 // disable all xxIEs to prevent the irq from firing again.
101 ch.cr().write(|_| {});
102
103 // Wake the future. It'll look at tcf and see it's set.
104 state.waker.wake();
105 }
106 }
107}
108
109/// DMA transfer.
110#[must_use = "futures do nothing unless you `.await` or poll them"]
111pub struct Transfer<'a> {
112 channel: PeripheralRef<'a, AnyChannel>,
113}
114
115impl<'a> Transfer<'a> {
116 /// Create a new read DMA transfer (peripheral to memory).
117 pub unsafe fn new_read<W: Word>(
118 channel: impl Peripheral<P = impl Channel> + 'a,
119 request: Request,
120 peri_addr: *mut W,
121 buf: &'a mut [W],
122 options: TransferOptions,
123 ) -> Self {
124 Self::new_read_raw(channel, request, peri_addr, buf, options)
125 }
126
127 /// Create a new read DMA transfer (peripheral to memory), using raw pointers.
128 pub unsafe fn new_read_raw<W: Word>(
129 channel: impl Peripheral<P = impl Channel> + 'a,
130 request: Request,
131 peri_addr: *mut W,
132 buf: *mut [W],
133 options: TransferOptions,
134 ) -> Self {
135 into_ref!(channel);
136
137 Self::new_inner(
138 channel.map_into(),
139 request,
140 Dir::PeripheralToMemory,
141 peri_addr as *const u32,
142 buf as *mut W as *mut u32,
143 buf.len(),
144 true,
145 W::size(),
146 options,
147 )
148 }
149
150 /// Create a new write DMA transfer (memory to peripheral).
151 pub unsafe fn new_write<W: Word>(
152 channel: impl Peripheral<P = impl Channel> + 'a,
153 request: Request,
154 buf: &'a [W],
155 peri_addr: *mut W,
156 options: TransferOptions,
157 ) -> Self {
158 Self::new_write_raw(channel, request, buf, peri_addr, options)
159 }
160
161 /// Create a new write DMA transfer (memory to peripheral), using raw pointers.
162 pub unsafe fn new_write_raw<W: Word>(
163 channel: impl Peripheral<P = impl Channel> + 'a,
164 request: Request,
165 buf: *const [W],
166 peri_addr: *mut W,
167 options: TransferOptions,
168 ) -> Self {
169 into_ref!(channel);
170
171 Self::new_inner(
172 channel.map_into(),
173 request,
174 Dir::MemoryToPeripheral,
175 peri_addr as *const u32,
176 buf as *const W as *mut u32,
177 buf.len(),
178 true,
179 W::size(),
180 options,
181 )
182 }
183
184 /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
185 pub unsafe fn new_write_repeated<W: Word>(
186 channel: impl Peripheral<P = impl Channel> + 'a,
187 request: Request,
188 repeated: &'a W,
189 count: usize,
190 peri_addr: *mut W,
191 options: TransferOptions,
192 ) -> Self {
193 into_ref!(channel);
194
195 Self::new_inner(
196 channel.map_into(),
197 request,
198 Dir::MemoryToPeripheral,
199 peri_addr as *const u32,
200 repeated as *const W as *mut u32,
201 count,
202 false,
203 W::size(),
204 options,
205 )
206 }
207
208 unsafe fn new_inner(
209 channel: PeripheralRef<'a, AnyChannel>,
210 request: Request,
211 dir: Dir,
212 peri_addr: *const u32,
213 mem_addr: *mut u32,
214 mem_len: usize,
215 incr_mem: bool,
216 data_size: WordSize,
217 _options: TransferOptions,
218 ) -> Self {
219 // BNDT is specified as bytes, not as number of transfers.
220 let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else {
221 panic!("DMA transfers may not be larger than 65535 bytes.");
222 };
223
224 let info = channel.info();
225 let ch = info.dma.ch(info.num);
226
227 // "Preceding reads and writes cannot be moved past subsequent writes."
228 fence(Ordering::SeqCst);
229
230 let this = Self { channel };
231
232 ch.cr().write(|w| w.set_reset(true));
233 ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs
234 ch.llr().write(|_| {}); // no linked list
235 ch.tr1().write(|w| {
236 w.set_sdw(data_size.into());
237 w.set_ddw(data_size.into());
238 w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem);
239 w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem);
240 });
241 ch.tr2().write(|w| {
242 w.set_dreq(match dir {
243 Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL,
244 Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL,
245 });
246 w.set_reqsel(request);
247 });
248 ch.tr3().write(|_| {}); // no address offsets.
249 ch.br1().write(|w| w.set_bndt(bndt));
250
251 match dir {
252 Dir::MemoryToPeripheral => {
253 ch.sar().write_value(mem_addr as _);
254 ch.dar().write_value(peri_addr as _);
255 }
256 Dir::PeripheralToMemory => {
257 ch.sar().write_value(peri_addr as _);
258 ch.dar().write_value(mem_addr as _);
259 }
260 }
261
262 ch.cr().write(|w| {
263 // Enable interrupts
264 w.set_tcie(true);
265 w.set_useie(true);
266 w.set_dteie(true);
267 w.set_suspie(true);
268
269 // Start it
270 w.set_en(true);
271 });
272
273 this
274 }
275
276 /// Request the transfer to stop.
277 ///
278 /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
279 pub fn request_stop(&mut self) {
280 let info = self.channel.info();
281 let ch = info.dma.ch(info.num);
282
283 ch.cr().modify(|w| w.set_susp(true))
284 }
285
286 /// Return whether this transfer is still running.
287 ///
288 /// If this returns `false`, it can be because either the transfer finished, or
289 /// it was requested to stop early with [`request_stop`](Self::request_stop).
290 pub fn is_running(&mut self) -> bool {
291 let info = self.channel.info();
292 let ch = info.dma.ch(info.num);
293
294 let sr = ch.sr().read();
295 !sr.tcf() && !sr.suspf()
296 }
297
298 /// Gets the total remaining transfers for the channel
299 /// Note: this will be zero for transfers that completed without cancellation.
300 pub fn get_remaining_transfers(&self) -> u16 {
301 let info = self.channel.info();
302 let ch = info.dma.ch(info.num);
303
304 ch.br1().read().bndt()
305 }
306
307 /// Blocking wait until the transfer finishes.
308 pub fn blocking_wait(mut self) {
309 while self.is_running() {}
310
311 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
312 fence(Ordering::SeqCst);
313
314 core::mem::forget(self);
315 }
316}
317
318impl<'a> Drop for Transfer<'a> {
319 fn drop(&mut self) {
320 self.request_stop();
321 while self.is_running() {}
322
323 // "Subsequent reads and writes cannot be moved ahead of preceding reads."
324 fence(order:Ordering::SeqCst);
325 }
326}
327
328impl<'a> Unpin for Transfer<'a> {}
329impl<'a> Future for Transfer<'a> {
330 type Output = ();
331 fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
332 let state: &ChannelState = &STATE[self.channel.id as usize];
333 state.waker.register(cx.waker());
334
335 if self.is_running() {
336 Poll::Pending
337 } else {
338 Poll::Ready(())
339 }
340 }
341}
342