1#![allow(clippy::missing_safety_doc)]
2#![allow(clippy::identity_op)]
3#![allow(clippy::unnecessary_cast)]
4#![allow(clippy::erasing_op)]
5
6#[derive(Copy, Clone, Eq, PartialEq)]
7pub struct Channel {
8 ptr: *mut u8,
9}
10unsafe impl Send for Channel {}
11unsafe impl Sync for Channel {}
12impl Channel {
13 #[inline(always)]
14 pub const unsafe fn from_ptr(ptr: *mut ()) -> Self {
15 Self { ptr: ptr as _ }
16 }
17 #[inline(always)]
18 pub const fn as_ptr(&self) -> *mut () {
19 self.ptr as _
20 }
21 #[doc = "LPDMA channel 15 linked-list base address register"]
22 #[inline(always)]
23 pub const fn lbar(self) -> crate::common::Reg<regs::ChLbar, crate::common::RW> {
24 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x0usize) as _) }
25 }
26 #[doc = "LPDMA channel 15 flag clear register"]
27 #[inline(always)]
28 pub const fn fcr(self) -> crate::common::Reg<regs::ChFcr, crate::common::RW> {
29 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x0cusize) as _) }
30 }
31 #[doc = "LPDMA channel 15 status register"]
32 #[inline(always)]
33 pub const fn sr(self) -> crate::common::Reg<regs::ChSr, crate::common::RW> {
34 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x10usize) as _) }
35 }
36 #[doc = "LPDMA channel 15 control register"]
37 #[inline(always)]
38 pub const fn cr(self) -> crate::common::Reg<regs::ChCr, crate::common::RW> {
39 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x14usize) as _) }
40 }
41 #[doc = "LPDMA channel 15 transfer register 1"]
42 #[inline(always)]
43 pub const fn tr1(self) -> crate::common::Reg<regs::ChTr1, crate::common::RW> {
44 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x40usize) as _) }
45 }
46 #[doc = "LPDMA channel 15 transfer register 2"]
47 #[inline(always)]
48 pub const fn tr2(self) -> crate::common::Reg<regs::ChTr2, crate::common::RW> {
49 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x44usize) as _) }
50 }
51 #[doc = "LPDMA channel 15 alternate block register 1"]
52 #[inline(always)]
53 pub const fn br1(self) -> crate::common::Reg<regs::ChBr1, crate::common::RW> {
54 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x48usize) as _) }
55 }
56 #[doc = "LPDMA channel 15 source address register"]
57 #[inline(always)]
58 pub const fn sar(self) -> crate::common::Reg<u32, crate::common::RW> {
59 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x4cusize) as _) }
60 }
61 #[doc = "LPDMA channel 15 destination address register"]
62 #[inline(always)]
63 pub const fn dar(self) -> crate::common::Reg<u32, crate::common::RW> {
64 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x50usize) as _) }
65 }
66 #[doc = "LPDMA channel 15 transfer register 3"]
67 #[inline(always)]
68 pub const fn tr3(self) -> crate::common::Reg<regs::ChTr3, crate::common::RW> {
69 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x54usize) as _) }
70 }
71 #[doc = "LPDMA channel 15 block register 2"]
72 #[inline(always)]
73 pub const fn br2(self) -> crate::common::Reg<regs::ChBr2, crate::common::RW> {
74 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x58usize) as _) }
75 }
76 #[doc = "LPDMA channel 15 alternate linked-list address register"]
77 #[inline(always)]
78 pub const fn llr(self) -> crate::common::Reg<regs::ChLlr, crate::common::RW> {
79 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x7cusize) as _) }
80 }
81}
82#[doc = "LPDMA"]
83#[derive(Copy, Clone, Eq, PartialEq)]
84pub struct Lpdma {
85 ptr: *mut u8,
86}
87unsafe impl Send for Lpdma {}
88unsafe impl Sync for Lpdma {}
89impl Lpdma {
90 #[inline(always)]
91 pub const unsafe fn from_ptr(ptr: *mut ()) -> Self {
92 Self { ptr: ptr as _ }
93 }
94 #[inline(always)]
95 pub const fn as_ptr(&self) -> *mut () {
96 self.ptr as _
97 }
98 #[doc = "LPDMA secure configuration register"]
99 #[inline(always)]
100 pub const fn seccfgr(self) -> crate::common::Reg<regs::Seccfgr, crate::common::RW> {
101 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x0usize) as _) }
102 }
103 #[doc = "LPDMA privileged configuration register"]
104 #[inline(always)]
105 pub const fn privcfgr(self) -> crate::common::Reg<regs::Privcfgr, crate::common::RW> {
106 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x04usize) as _) }
107 }
108 #[doc = "LPDMA configuration lock register"]
109 #[inline(always)]
110 pub const fn rcfglockr(self) -> crate::common::Reg<regs::Rcfglockr, crate::common::RW> {
111 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x08usize) as _) }
112 }
113 #[doc = "LPDMA non-secure masked interrupt status register"]
114 #[inline(always)]
115 pub const fn misr(self) -> crate::common::Reg<regs::Misr, crate::common::RW> {
116 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x0cusize) as _) }
117 }
118 #[doc = "LPDMA secure masked interrupt status register"]
119 #[inline(always)]
120 pub const fn smisr(self) -> crate::common::Reg<regs::Misr, crate::common::RW> {
121 unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x10usize) as _) }
122 }
123 #[inline(always)]
124 pub const fn ch(self, n: usize) -> Channel {
125 assert!(n < 4usize);
126 unsafe { Channel::from_ptr(self.ptr.add(0x50usize + n * 128usize) as _) }
127 }
128}
129pub mod regs {
130 #[doc = "LPDMA channel 15 alternate block register 1"]
131 #[repr(transparent)]
132 #[derive(Copy, Clone, Eq, PartialEq)]
133 pub struct ChBr1(pub u32);
134 impl ChBr1 {
135 #[doc = "block number of data bytes to transfer from the source. Block size transferred from the source. When the channel is enabled, this field becomes read-only and is decremented, indicating the remaining number of data items in the current source block to be transferred. BNDT\\[15:0\\]
136is programmed in number of bytes, maximum source block size is 64 Kbytes -1. Once the last data transfer is completed (BNDT\\[15:0\\]
137= 0): - if CH\\[x\\].LLR.UB1 = 1, this field is updated by the LLI in the memory. - if CH\\[x\\].LLR.UB1 = 0 and if there is at least one not null Uxx update bit, this field is internally restored to the programmed value. - if all CH\\[x\\].LLR.Uxx = 0 and if CH\\[x\\].LLR.LA\\[15:0\\]
138≠ 0, this field is internally restored to the programmed value (infinite/continuous last LLI). - if CH\\[x\\].LLR = 0, this field is kept as zero following the last LLI data transfer. Note: A non-null source block size must be a multiple of the source data width (BNDT\\[2:0\\]
139versus CH\\[x\\].TR1.SDW_LOG2\\[1:0\\]). Else a user setting error is reported and no transfer is issued. When configured in packing mode (CH\\[x\\].TR1.PAM\\[1\\]=1 and destination data width different from source data width), a non-null source block size must be a multiple of the destination data width (BNDT\\[2:0\\]
140versus CH\\[x\\].TR1.DDW\\[1:0\\]). Else a user setting error is reported and no transfer is issued."]
141 #[inline(always)]
142 pub const fn bndt(&self) -> u16 {
143 let val = (self.0 >> 0usize) & 0xffff;
144 val as u16
145 }
146 #[doc = "block number of data bytes to transfer from the source. Block size transferred from the source. When the channel is enabled, this field becomes read-only and is decremented, indicating the remaining number of data items in the current source block to be transferred. BNDT\\[15:0\\]
147is programmed in number of bytes, maximum source block size is 64 Kbytes -1. Once the last data transfer is completed (BNDT\\[15:0\\]
148= 0): - if CH\\[x\\].LLR.UB1 = 1, this field is updated by the LLI in the memory. - if CH\\[x\\].LLR.UB1 = 0 and if there is at least one not null Uxx update bit, this field is internally restored to the programmed value. - if all CH\\[x\\].LLR.Uxx = 0 and if CH\\[x\\].LLR.LA\\[15:0\\]
149≠ 0, this field is internally restored to the programmed value (infinite/continuous last LLI). - if CH\\[x\\].LLR = 0, this field is kept as zero following the last LLI data transfer. Note: A non-null source block size must be a multiple of the source data width (BNDT\\[2:0\\]
150versus CH\\[x\\].TR1.SDW_LOG2\\[1:0\\]). Else a user setting error is reported and no transfer is issued. When configured in packing mode (CH\\[x\\].TR1.PAM\\[1\\]=1 and destination data width different from source data width), a non-null source block size must be a multiple of the destination data width (BNDT\\[2:0\\]
151versus CH\\[x\\].TR1.DDW\\[1:0\\]). Else a user setting error is reported and no transfer is issued."]
152 #[inline(always)]
153 pub fn set_bndt(&mut self, val: u16) {
154 self.0 = (self.0 & !(0xffff << 0usize)) | (((val as u32) & 0xffff) << 0usize);
155 }
156 #[doc = "Block repeat counter. This field contains the number of repetitions of the current block (0 to 2047). When the channel is enabled, this field becomes read-only. After decrements, this field indicates the remaining number of blocks, excluding the current one. This counter is hardware decremented for each completed block transfer. Once the last block transfer is completed (BRC\\[10:0\\]
157= BNDT\\[15:0\\]
158= 0): If CH\\[x\\].LLR.UB1 = 1, all CH\\[x\\].BR1 fields are updated by the next LLI in the memory. If CH\\[x\\].LLR.UB1 = 0 and if there is at least one not null Uxx update bit, this field is internally restored to the programmed value. if all CH\\[x\\].LLR.Uxx = 0 and if CH\\[x\\].LLR.LA\\[15:0\\]
159≠ 0, this field is internally restored to the programmed value (infinite/continuous last LLI). if CH\\[x\\].LLR = 0, this field is kept as zero following the last LLI and data transfer."]
160 #[inline(always)]
161 pub const fn brc(&self) -> u16 {
162 let val = (self.0 >> 16usize) & 0x07ff;
163 val as u16
164 }
165 #[doc = "Block repeat counter. This field contains the number of repetitions of the current block (0 to 2047). When the channel is enabled, this field becomes read-only. After decrements, this field indicates the remaining number of blocks, excluding the current one. This counter is hardware decremented for each completed block transfer. Once the last block transfer is completed (BRC\\[10:0\\]
166= BNDT\\[15:0\\]
167= 0): If CH\\[x\\].LLR.UB1 = 1, all CH\\[x\\].BR1 fields are updated by the next LLI in the memory. If CH\\[x\\].LLR.UB1 = 0 and if there is at least one not null Uxx update bit, this field is internally restored to the programmed value. if all CH\\[x\\].LLR.Uxx = 0 and if CH\\[x\\].LLR.LA\\[15:0\\]
168≠ 0, this field is internally restored to the programmed value (infinite/continuous last LLI). if CH\\[x\\].LLR = 0, this field is kept as zero following the last LLI and data transfer."]
169 #[inline(always)]
170 pub fn set_brc(&mut self, val: u16) {
171 self.0 = (self.0 & !(0x07ff << 16usize)) | (((val as u32) & 0x07ff) << 16usize);
172 }
173 #[doc = "source address decrement"]
174 #[inline(always)]
175 pub const fn sdec(&self) -> super::vals::Dec {
176 let val = (self.0 >> 28usize) & 0x01;
177 super::vals::Dec::from_bits(val as u8)
178 }
179 #[doc = "source address decrement"]
180 #[inline(always)]
181 pub fn set_sdec(&mut self, val: super::vals::Dec) {
182 self.0 = (self.0 & !(0x01 << 28usize)) | (((val.to_bits() as u32) & 0x01) << 28usize);
183 }
184 #[doc = "destination address decrement"]
185 #[inline(always)]
186 pub const fn ddec(&self) -> super::vals::Dec {
187 let val = (self.0 >> 29usize) & 0x01;
188 super::vals::Dec::from_bits(val as u8)
189 }
190 #[doc = "destination address decrement"]
191 #[inline(always)]
192 pub fn set_ddec(&mut self, val: super::vals::Dec) {
193 self.0 = (self.0 & !(0x01 << 29usize)) | (((val.to_bits() as u32) & 0x01) << 29usize);
194 }
195 #[doc = "Block repeat source address decrement. Note: On top of this increment/decrement (depending on BRSDEC), CH\\[x\\].SAR is in the same time also updated by the increment/decrement (depending on SDEC) of the CH\\[x\\].TR3.SAO value, as it is done after any programmed burst transfer."]
196 #[inline(always)]
197 pub const fn brsdec(&self) -> super::vals::Dec {
198 let val = (self.0 >> 30usize) & 0x01;
199 super::vals::Dec::from_bits(val as u8)
200 }
201 #[doc = "Block repeat source address decrement. Note: On top of this increment/decrement (depending on BRSDEC), CH\\[x\\].SAR is in the same time also updated by the increment/decrement (depending on SDEC) of the CH\\[x\\].TR3.SAO value, as it is done after any programmed burst transfer."]
202 #[inline(always)]
203 pub fn set_brsdec(&mut self, val: super::vals::Dec) {
204 self.0 = (self.0 & !(0x01 << 30usize)) | (((val.to_bits() as u32) & 0x01) << 30usize);
205 }
206 #[doc = "Block repeat destination address decrement. Note: On top of this increment/decrement (depending on BRDDEC), CH\\[x\\].DAR is in the same time also updated by the increment/decrement (depending on DDEC) of the CH\\[x\\].TR3.DAO value, as it is usually done at the end of each programmed burst transfer."]
207 #[inline(always)]
208 pub const fn brddec(&self) -> super::vals::Dec {
209 let val = (self.0 >> 31usize) & 0x01;
210 super::vals::Dec::from_bits(val as u8)
211 }
212 #[doc = "Block repeat destination address decrement. Note: On top of this increment/decrement (depending on BRDDEC), CH\\[x\\].DAR is in the same time also updated by the increment/decrement (depending on DDEC) of the CH\\[x\\].TR3.DAO value, as it is usually done at the end of each programmed burst transfer."]
213 #[inline(always)]
214 pub fn set_brddec(&mut self, val: super::vals::Dec) {
215 self.0 = (self.0 & !(0x01 << 31usize)) | (((val.to_bits() as u32) & 0x01) << 31usize);
216 }
217 }
218 impl Default for ChBr1 {
219 #[inline(always)]
220 fn default() -> ChBr1 {
221 ChBr1(0)
222 }
223 }
224 impl core::fmt::Debug for ChBr1 {
225 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
226 f.debug_struct("ChBr1")
227 .field("bndt", &self.bndt())
228 .field("brc", &self.brc())
229 .field("sdec", &self.sdec())
230 .field("ddec", &self.ddec())
231 .field("brsdec", &self.brsdec())
232 .field("brddec", &self.brddec())
233 .finish()
234 }
235 }
236 #[cfg(feature = "defmt")]
237 impl defmt::Format for ChBr1 {
238 fn format(&self, f: defmt::Formatter) {
239 #[derive(defmt :: Format)]
240 struct ChBr1 {
241 bndt: u16,
242 brc: u16,
243 sdec: super::vals::Dec,
244 ddec: super::vals::Dec,
245 brsdec: super::vals::Dec,
246 brddec: super::vals::Dec,
247 }
248 let proxy = ChBr1 {
249 bndt: self.bndt(),
250 brc: self.brc(),
251 sdec: self.sdec(),
252 ddec: self.ddec(),
253 brsdec: self.brsdec(),
254 brddec: self.brddec(),
255 };
256 defmt::write!(f, "{}", proxy)
257 }
258 }
259 #[doc = "LPDMA channel 12 block register 2"]
260 #[repr(transparent)]
261 #[derive(Copy, Clone, Eq, PartialEq)]
262 pub struct ChBr2(pub u32);
263 impl ChBr2 {
264 #[doc = "Block repeated source address offset. For a channel with 2D addressing capability, this field is used to update (by addition or subtraction depending on CH\\[x\\].BR1.BRSDEC) the current source address (CH\\[x\\].SAR) at the end of a block transfer. Note: A block repeated source address offset must be aligned with the programmed data width of a source burst (BRSAO\\[2:0\\]
265versus CH\\[x\\].TR1.SDW_LOG2\\[1:0\\]). Else a user setting error is reported and no transfer is issued."]
266 #[inline(always)]
267 pub const fn brsao(&self) -> u16 {
268 let val = (self.0 >> 0usize) & 0xffff;
269 val as u16
270 }
271 #[doc = "Block repeated source address offset. For a channel with 2D addressing capability, this field is used to update (by addition or subtraction depending on CH\\[x\\].BR1.BRSDEC) the current source address (CH\\[x\\].SAR) at the end of a block transfer. Note: A block repeated source address offset must be aligned with the programmed data width of a source burst (BRSAO\\[2:0\\]
272versus CH\\[x\\].TR1.SDW_LOG2\\[1:0\\]). Else a user setting error is reported and no transfer is issued."]
273 #[inline(always)]
274 pub fn set_brsao(&mut self, val: u16) {
275 self.0 = (self.0 & !(0xffff << 0usize)) | (((val as u32) & 0xffff) << 0usize);
276 }
277 #[doc = "Block repeated destination address offset. For a channel with 2D addressing capability, this field is used to update (by addition or subtraction depending on CH\\[x\\].BR1.BRDDEC) the current destination address (CH\\[x\\].DAR) at the end of a block transfer. Note: A block repeated destination address offset must be aligned with the programmed data width of a destination burst (BRDAO\\[2:0\\]
278versus CH\\[x\\].TR1.DDW\\[1:0\\]). Else a user setting error is reported and no transfer is issued."]
279 #[inline(always)]
280 pub const fn brdao(&self) -> u16 {
281 let val = (self.0 >> 16usize) & 0xffff;
282 val as u16
283 }
284 #[doc = "Block repeated destination address offset. For a channel with 2D addressing capability, this field is used to update (by addition or subtraction depending on CH\\[x\\].BR1.BRDDEC) the current destination address (CH\\[x\\].DAR) at the end of a block transfer. Note: A block repeated destination address offset must be aligned with the programmed data width of a destination burst (BRDAO\\[2:0\\]
285versus CH\\[x\\].TR1.DDW\\[1:0\\]). Else a user setting error is reported and no transfer is issued."]
286 #[inline(always)]
287 pub fn set_brdao(&mut self, val: u16) {
288 self.0 = (self.0 & !(0xffff << 16usize)) | (((val as u32) & 0xffff) << 16usize);
289 }
290 }
291 impl Default for ChBr2 {
292 #[inline(always)]
293 fn default() -> ChBr2 {
294 ChBr2(0)
295 }
296 }
297 impl core::fmt::Debug for ChBr2 {
298 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
299 f.debug_struct("ChBr2")
300 .field("brsao", &self.brsao())
301 .field("brdao", &self.brdao())
302 .finish()
303 }
304 }
305 #[cfg(feature = "defmt")]
306 impl defmt::Format for ChBr2 {
307 fn format(&self, f: defmt::Formatter) {
308 #[derive(defmt :: Format)]
309 struct ChBr2 {
310 brsao: u16,
311 brdao: u16,
312 }
313 let proxy = ChBr2 {
314 brsao: self.brsao(),
315 brdao: self.brdao(),
316 };
317 defmt::write!(f, "{}", proxy)
318 }
319 }
320 #[doc = "LPDMA channel 11 control register"]
321 #[repr(transparent)]
322 #[derive(Copy, Clone, Eq, PartialEq)]
323 pub struct ChCr(pub u32);
324 impl ChCr {
325 #[doc = "enable. Writing 1 into the field RESET (bit 1) causes the hardware to de-assert this bit, whatever is written into this bit 0. Else: this bit is de-asserted by hardware when there is a transfer error (master bus error or user setting error) or when there is a channel transfer complete (channel ready to be configured, e.g. if LSM=1 at the end of a single execution of the LLI). Else, this bit can be asserted by software. Writing 0 into this EN bit is ignored."]
326 #[inline(always)]
327 pub const fn en(&self) -> bool {
328 let val = (self.0 >> 0usize) & 0x01;
329 val != 0
330 }
331 #[doc = "enable. Writing 1 into the field RESET (bit 1) causes the hardware to de-assert this bit, whatever is written into this bit 0. Else: this bit is de-asserted by hardware when there is a transfer error (master bus error or user setting error) or when there is a channel transfer complete (channel ready to be configured, e.g. if LSM=1 at the end of a single execution of the LLI). Else, this bit can be asserted by software. Writing 0 into this EN bit is ignored."]
332 #[inline(always)]
333 pub fn set_en(&mut self, val: bool) {
334 self.0 = (self.0 & !(0x01 << 0usize)) | (((val as u32) & 0x01) << 0usize);
335 }
336 #[doc = "reset. This bit is write only. Writing 0 has no impact. Writing 1 implies the reset of the following: the FIFO, the channel internal state, SUSP and EN bits (whatever is written receptively in bit 2 and bit 0). The reset is effective when the channel is in steady state, meaning one of the following: - active channel in suspended state (CH\\[x\\].SR.SUSPF = 1 and CH\\[x\\].SR.IDLEF = CH\\[x\\].CR.EN = 1). - channel in disabled state (CH\\[x\\].SR.IDLEF = 1 and CH\\[x\\].CR.EN = 0). After writing a RESET, to continue using this channel, the user must explicitly reconfigure the channel including the hardware-modified configuration registers (CH\\[x\\].BR1, CH\\[x\\].SAR and CH\\[x\\].DAR) before enabling again the channel (see the programming sequence in )."]
337 #[inline(always)]
338 pub const fn reset(&self) -> bool {
339 let val = (self.0 >> 1usize) & 0x01;
340 val != 0
341 }
342 #[doc = "reset. This bit is write only. Writing 0 has no impact. Writing 1 implies the reset of the following: the FIFO, the channel internal state, SUSP and EN bits (whatever is written receptively in bit 2 and bit 0). The reset is effective when the channel is in steady state, meaning one of the following: - active channel in suspended state (CH\\[x\\].SR.SUSPF = 1 and CH\\[x\\].SR.IDLEF = CH\\[x\\].CR.EN = 1). - channel in disabled state (CH\\[x\\].SR.IDLEF = 1 and CH\\[x\\].CR.EN = 0). After writing a RESET, to continue using this channel, the user must explicitly reconfigure the channel including the hardware-modified configuration registers (CH\\[x\\].BR1, CH\\[x\\].SAR and CH\\[x\\].DAR) before enabling again the channel (see the programming sequence in )."]
343 #[inline(always)]
344 pub fn set_reset(&mut self, val: bool) {
345 self.0 = (self.0 & !(0x01 << 1usize)) | (((val as u32) & 0x01) << 1usize);
346 }
347 #[doc = "suspend. Writing 1 into the field RESET (bit 1) causes the hardware to de-assert this bit, whatever is written into this bit 2. Else: Software must write 1 in order to suspend an active channel i.e. a channel with an on-going LPDMA transfer over its master ports. The software must write 0 in order to resume a suspended channel, following the programming sequence detailed in ."]
348 #[inline(always)]
349 pub const fn susp(&self) -> bool {
350 let val = (self.0 >> 2usize) & 0x01;
351 val != 0
352 }
353 #[doc = "suspend. Writing 1 into the field RESET (bit 1) causes the hardware to de-assert this bit, whatever is written into this bit 2. Else: Software must write 1 in order to suspend an active channel i.e. a channel with an on-going LPDMA transfer over its master ports. The software must write 0 in order to resume a suspended channel, following the programming sequence detailed in ."]
354 #[inline(always)]
355 pub fn set_susp(&mut self, val: bool) {
356 self.0 = (self.0 & !(0x01 << 2usize)) | (((val as u32) & 0x01) << 2usize);
357 }
358 #[doc = "transfer complete interrupt enable"]
359 #[inline(always)]
360 pub const fn tcie(&self) -> bool {
361 let val = (self.0 >> 8usize) & 0x01;
362 val != 0
363 }
364 #[doc = "transfer complete interrupt enable"]
365 #[inline(always)]
366 pub fn set_tcie(&mut self, val: bool) {
367 self.0 = (self.0 & !(0x01 << 8usize)) | (((val as u32) & 0x01) << 8usize);
368 }
369 #[doc = "half transfer complete interrupt enable"]
370 #[inline(always)]
371 pub const fn htie(&self) -> bool {
372 let val = (self.0 >> 9usize) & 0x01;
373 val != 0
374 }
375 #[doc = "half transfer complete interrupt enable"]
376 #[inline(always)]
377 pub fn set_htie(&mut self, val: bool) {
378 self.0 = (self.0 & !(0x01 << 9usize)) | (((val as u32) & 0x01) << 9usize);
379 }
380 #[doc = "data transfer error interrupt enable"]
381 #[inline(always)]
382 pub const fn dteie(&self) -> bool {
383 let val = (self.0 >> 10usize) & 0x01;
384 val != 0
385 }
386 #[doc = "data transfer error interrupt enable"]
387 #[inline(always)]
388 pub fn set_dteie(&mut self, val: bool) {
389 self.0 = (self.0 & !(0x01 << 10usize)) | (((val as u32) & 0x01) << 10usize);
390 }
391 #[doc = "update link transfer error interrupt enable"]
392 #[inline(always)]
393 pub const fn uleie(&self) -> bool {
394 let val = (self.0 >> 11usize) & 0x01;
395 val != 0
396 }
397 #[doc = "update link transfer error interrupt enable"]
398 #[inline(always)]
399 pub fn set_uleie(&mut self, val: bool) {
400 self.0 = (self.0 & !(0x01 << 11usize)) | (((val as u32) & 0x01) << 11usize);
401 }
402 #[doc = "user setting error interrupt enable"]
403 #[inline(always)]
404 pub const fn useie(&self) -> bool {
405 let val = (self.0 >> 12usize) & 0x01;
406 val != 0
407 }
408 #[doc = "user setting error interrupt enable"]
409 #[inline(always)]
410 pub fn set_useie(&mut self, val: bool) {
411 self.0 = (self.0 & !(0x01 << 12usize)) | (((val as u32) & 0x01) << 12usize);
412 }
413 #[doc = "completed suspension interrupt enable"]
414 #[inline(always)]
415 pub const fn suspie(&self) -> bool {
416 let val = (self.0 >> 13usize) & 0x01;
417 val != 0
418 }
419 #[doc = "completed suspension interrupt enable"]
420 #[inline(always)]
421 pub fn set_suspie(&mut self, val: bool) {
422 self.0 = (self.0 & !(0x01 << 13usize)) | (((val as u32) & 0x01) << 13usize);
423 }
424 #[doc = "trigger overrun interrupt enable"]
425 #[inline(always)]
426 pub const fn toie(&self) -> bool {
427 let val = (self.0 >> 14usize) & 0x01;
428 val != 0
429 }
430 #[doc = "trigger overrun interrupt enable"]
431 #[inline(always)]
432 pub fn set_toie(&mut self, val: bool) {
433 self.0 = (self.0 & !(0x01 << 14usize)) | (((val as u32) & 0x01) << 14usize);
434 }
435 #[doc = "Link step mode. First the (possible 1D/repeated) block transfer is executed as defined by the current internal register file until CH\\[x\\].BR1.BNDT\\[15:0\\]
436= 0 and CH\\[x\\].BR1.BRC\\[10:0\\]
437= 0 if present. Secondly the next linked-list data structure is conditionally uploaded from memory as defined by CH\\[x\\].LLR. Then channel execution is completed. Note: This bit must be written when EN=0. This bit is read-only when EN=1."]
438 #[inline(always)]
439 pub const fn lsm(&self) -> super::vals::Lsm {
440 let val = (self.0 >> 16usize) & 0x01;
441 super::vals::Lsm::from_bits(val as u8)
442 }
443 #[doc = "Link step mode. First the (possible 1D/repeated) block transfer is executed as defined by the current internal register file until CH\\[x\\].BR1.BNDT\\[15:0\\]
444= 0 and CH\\[x\\].BR1.BRC\\[10:0\\]
445= 0 if present. Secondly the next linked-list data structure is conditionally uploaded from memory as defined by CH\\[x\\].LLR. Then channel execution is completed. Note: This bit must be written when EN=0. This bit is read-only when EN=1."]
446 #[inline(always)]
447 pub fn set_lsm(&mut self, val: super::vals::Lsm) {
448 self.0 = (self.0 & !(0x01 << 16usize)) | (((val.to_bits() as u32) & 0x01) << 16usize);
449 }
450 #[doc = "priority level of the channel x LPDMA transfer versus others. Note: This bit must be written when EN = 0. This bit is read-only when EN = 1."]
451 #[inline(always)]
452 pub const fn prio(&self) -> super::vals::Prio {
453 let val = (self.0 >> 22usize) & 0x03;
454 super::vals::Prio::from_bits(val as u8)
455 }
456 #[doc = "priority level of the channel x LPDMA transfer versus others. Note: This bit must be written when EN = 0. This bit is read-only when EN = 1."]
457 #[inline(always)]
458 pub fn set_prio(&mut self, val: super::vals::Prio) {
459 self.0 = (self.0 & !(0x03 << 22usize)) | (((val.to_bits() as u32) & 0x03) << 22usize);
460 }
461 }
462 impl Default for ChCr {
463 #[inline(always)]
464 fn default() -> ChCr {
465 ChCr(0)
466 }
467 }
468 impl core::fmt::Debug for ChCr {
469 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
470 f.debug_struct("ChCr")
471 .field("en", &self.en())
472 .field("reset", &self.reset())
473 .field("susp", &self.susp())
474 .field("tcie", &self.tcie())
475 .field("htie", &self.htie())
476 .field("dteie", &self.dteie())
477 .field("uleie", &self.uleie())
478 .field("useie", &self.useie())
479 .field("suspie", &self.suspie())
480 .field("toie", &self.toie())
481 .field("lsm", &self.lsm())
482 .field("prio", &self.prio())
483 .finish()
484 }
485 }
486 #[cfg(feature = "defmt")]
487 impl defmt::Format for ChCr {
488 fn format(&self, f: defmt::Formatter) {
489 #[derive(defmt :: Format)]
490 struct ChCr {
491 en: bool,
492 reset: bool,
493 susp: bool,
494 tcie: bool,
495 htie: bool,
496 dteie: bool,
497 uleie: bool,
498 useie: bool,
499 suspie: bool,
500 toie: bool,
501 lsm: super::vals::Lsm,
502 prio: super::vals::Prio,
503 }
504 let proxy = ChCr {
505 en: self.en(),
506 reset: self.reset(),
507 susp: self.susp(),
508 tcie: self.tcie(),
509 htie: self.htie(),
510 dteie: self.dteie(),
511 uleie: self.uleie(),
512 useie: self.useie(),
513 suspie: self.suspie(),
514 toie: self.toie(),
515 lsm: self.lsm(),
516 prio: self.prio(),
517 };
518 defmt::write!(f, "{}", proxy)
519 }
520 }
521 #[doc = "LPDMA channel 7 flag clear register"]
522 #[repr(transparent)]
523 #[derive(Copy, Clone, Eq, PartialEq)]
524 pub struct ChFcr(pub u32);
525 impl ChFcr {
526 #[doc = "transfer complete flag clear"]
527 #[inline(always)]
528 pub const fn tcf(&self) -> bool {
529 let val = (self.0 >> 8usize) & 0x01;
530 val != 0
531 }
532 #[doc = "transfer complete flag clear"]
533 #[inline(always)]
534 pub fn set_tcf(&mut self, val: bool) {
535 self.0 = (self.0 & !(0x01 << 8usize)) | (((val as u32) & 0x01) << 8usize);
536 }
537 #[doc = "half transfer flag clear"]
538 #[inline(always)]
539 pub const fn htf(&self) -> bool {
540 let val = (self.0 >> 9usize) & 0x01;
541 val != 0
542 }
543 #[doc = "half transfer flag clear"]
544 #[inline(always)]
545 pub fn set_htf(&mut self, val: bool) {
546 self.0 = (self.0 & !(0x01 << 9usize)) | (((val as u32) & 0x01) << 9usize);
547 }
548 #[doc = "data transfer error flag clear"]
549 #[inline(always)]
550 pub const fn dtef(&self) -> bool {
551 let val = (self.0 >> 10usize) & 0x01;
552 val != 0
553 }
554 #[doc = "data transfer error flag clear"]
555 #[inline(always)]
556 pub fn set_dtef(&mut self, val: bool) {
557 self.0 = (self.0 & !(0x01 << 10usize)) | (((val as u32) & 0x01) << 10usize);
558 }
559 #[doc = "update link transfer error flag clear"]
560 #[inline(always)]
561 pub const fn ulef(&self) -> bool {
562 let val = (self.0 >> 11usize) & 0x01;
563 val != 0
564 }
565 #[doc = "update link transfer error flag clear"]
566 #[inline(always)]
567 pub fn set_ulef(&mut self, val: bool) {
568 self.0 = (self.0 & !(0x01 << 11usize)) | (((val as u32) & 0x01) << 11usize);
569 }
570 #[doc = "user setting error flag clear"]
571 #[inline(always)]
572 pub const fn usef(&self) -> bool {
573 let val = (self.0 >> 12usize) & 0x01;
574 val != 0
575 }
576 #[doc = "user setting error flag clear"]
577 #[inline(always)]
578 pub fn set_usef(&mut self, val: bool) {
579 self.0 = (self.0 & !(0x01 << 12usize)) | (((val as u32) & 0x01) << 12usize);
580 }
581 #[doc = "completed suspension flag clear"]
582 #[inline(always)]
583 pub const fn suspf(&self) -> bool {
584 let val = (self.0 >> 13usize) & 0x01;
585 val != 0
586 }
587 #[doc = "completed suspension flag clear"]
588 #[inline(always)]
589 pub fn set_suspf(&mut self, val: bool) {
590 self.0 = (self.0 & !(0x01 << 13usize)) | (((val as u32) & 0x01) << 13usize);
591 }
592 #[doc = "trigger overrun flag clear"]
593 #[inline(always)]
594 pub const fn tof(&self) -> bool {
595 let val = (self.0 >> 14usize) & 0x01;
596 val != 0
597 }
598 #[doc = "trigger overrun flag clear"]
599 #[inline(always)]
600 pub fn set_tof(&mut self, val: bool) {
601 self.0 = (self.0 & !(0x01 << 14usize)) | (((val as u32) & 0x01) << 14usize);
602 }
603 }
604 impl Default for ChFcr {
605 #[inline(always)]
606 fn default() -> ChFcr {
607 ChFcr(0)
608 }
609 }
610 impl core::fmt::Debug for ChFcr {
611 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
612 f.debug_struct("ChFcr")
613 .field("tcf", &self.tcf())
614 .field("htf", &self.htf())
615 .field("dtef", &self.dtef())
616 .field("ulef", &self.ulef())
617 .field("usef", &self.usef())
618 .field("suspf", &self.suspf())
619 .field("tof", &self.tof())
620 .finish()
621 }
622 }
623 #[cfg(feature = "defmt")]
624 impl defmt::Format for ChFcr {
625 fn format(&self, f: defmt::Formatter) {
626 #[derive(defmt :: Format)]
627 struct ChFcr {
628 tcf: bool,
629 htf: bool,
630 dtef: bool,
631 ulef: bool,
632 usef: bool,
633 suspf: bool,
634 tof: bool,
635 }
636 let proxy = ChFcr {
637 tcf: self.tcf(),
638 htf: self.htf(),
639 dtef: self.dtef(),
640 ulef: self.ulef(),
641 usef: self.usef(),
642 suspf: self.suspf(),
643 tof: self.tof(),
644 };
645 defmt::write!(f, "{}", proxy)
646 }
647 }
648 #[doc = "LPDMA channel 14 linked-list base address register"]
649 #[repr(transparent)]
650 #[derive(Copy, Clone, Eq, PartialEq)]
651 pub struct ChLbar(pub u32);
652 impl ChLbar {
653 #[doc = "linked-list base address of LPDMA channel x"]
654 #[inline(always)]
655 pub const fn lba(&self) -> u16 {
656 let val = (self.0 >> 16usize) & 0xffff;
657 val as u16
658 }
659 #[doc = "linked-list base address of LPDMA channel x"]
660 #[inline(always)]
661 pub fn set_lba(&mut self, val: u16) {
662 self.0 = (self.0 & !(0xffff << 16usize)) | (((val as u32) & 0xffff) << 16usize);
663 }
664 }
665 impl Default for ChLbar {
666 #[inline(always)]
667 fn default() -> ChLbar {
668 ChLbar(0)
669 }
670 }
671 impl core::fmt::Debug for ChLbar {
672 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
673 f.debug_struct("ChLbar").field("lba", &self.lba()).finish()
674 }
675 }
676 #[cfg(feature = "defmt")]
677 impl defmt::Format for ChLbar {
678 fn format(&self, f: defmt::Formatter) {
679 #[derive(defmt :: Format)]
680 struct ChLbar {
681 lba: u16,
682 }
683 let proxy = ChLbar { lba: self.lba() };
684 defmt::write!(f, "{}", proxy)
685 }
686 }
687 #[doc = "LPDMA channel 15 alternate linked-list address register"]
688 #[repr(transparent)]
689 #[derive(Copy, Clone, Eq, PartialEq)]
690 pub struct ChLlr(pub u32);
691 impl ChLlr {
692 #[doc = "pointer (16-bit low-significant address) to the next linked-list data structure. If UT1 = UT2 = UB1 = USA = UDA = ULL = 0 and if LA\\[15:20\\]
693= 0, the current LLI is the last one. The channel transfer is completed without any update of the linked-list LPDMA register file. Else, this field is the pointer to the memory address offset from which the next linked-list data structure is automatically fetched from, once the data transfer is completed, in order to conditionally update the linked-list LPDMA internal register file (CH\\[x\\].CTR1, CH\\[x\\].TR2, CH\\[x\\].BR1, CH\\[x\\].SAR, CH\\[x\\].DAR and CH\\[x\\].LLR). Note: The user must program the pointer to be 32-bit aligned. The two low-significant bits are write ignored."]
694 #[inline(always)]
695 pub const fn la(&self) -> u16 {
696 let val = (self.0 >> 2usize) & 0x3fff;
697 val as u16
698 }
699 #[doc = "pointer (16-bit low-significant address) to the next linked-list data structure. If UT1 = UT2 = UB1 = USA = UDA = ULL = 0 and if LA\\[15:20\\]
700= 0, the current LLI is the last one. The channel transfer is completed without any update of the linked-list LPDMA register file. Else, this field is the pointer to the memory address offset from which the next linked-list data structure is automatically fetched from, once the data transfer is completed, in order to conditionally update the linked-list LPDMA internal register file (CH\\[x\\].CTR1, CH\\[x\\].TR2, CH\\[x\\].BR1, CH\\[x\\].SAR, CH\\[x\\].DAR and CH\\[x\\].LLR). Note: The user must program the pointer to be 32-bit aligned. The two low-significant bits are write ignored."]
701 #[inline(always)]
702 pub fn set_la(&mut self, val: u16) {
703 self.0 = (self.0 & !(0x3fff << 2usize)) | (((val as u32) & 0x3fff) << 2usize);
704 }
705 #[doc = "Update CH\\[x\\].LLR register from memory. This bit is used to control the update of CH\\[x\\].LLR from the memory during the link transfer."]
706 #[inline(always)]
707 pub const fn ull(&self) -> bool {
708 let val = (self.0 >> 16usize) & 0x01;
709 val != 0
710 }
711 #[doc = "Update CH\\[x\\].LLR register from memory. This bit is used to control the update of CH\\[x\\].LLR from the memory during the link transfer."]
712 #[inline(always)]
713 pub fn set_ull(&mut self, val: bool) {
714 self.0 = (self.0 & !(0x01 << 16usize)) | (((val as u32) & 0x01) << 16usize);
715 }
716 #[doc = "Update CH\\[x\\].BR2 from memory. This bit controls the update of CH\\[x\\].BR2 from the memory during the link transfer."]
717 #[inline(always)]
718 pub const fn ub2(&self) -> bool {
719 let val = (self.0 >> 25usize) & 0x01;
720 val != 0
721 }
722 #[doc = "Update CH\\[x\\].BR2 from memory. This bit controls the update of CH\\[x\\].BR2 from the memory during the link transfer."]
723 #[inline(always)]
724 pub fn set_ub2(&mut self, val: bool) {
725 self.0 = (self.0 & !(0x01 << 25usize)) | (((val as u32) & 0x01) << 25usize);
726 }
727 #[doc = "Update CH\\[x\\].TR3 from memory. This bit controls the update of CH\\[x\\].TR3 from the memory during the link transfer."]
728 #[inline(always)]
729 pub const fn ut3(&self) -> bool {
730 let val = (self.0 >> 26usize) & 0x01;
731 val != 0
732 }
733 #[doc = "Update CH\\[x\\].TR3 from memory. This bit controls the update of CH\\[x\\].TR3 from the memory during the link transfer."]
734 #[inline(always)]
735 pub fn set_ut3(&mut self, val: bool) {
736 self.0 = (self.0 & !(0x01 << 26usize)) | (((val as u32) & 0x01) << 26usize);
737 }
738 #[doc = "Update CH\\[x\\].DAR register from memory. This bit is used to control the update of CH\\[x\\].DAR from the memory during the link transfer."]
739 #[inline(always)]
740 pub const fn uda(&self) -> bool {
741 let val = (self.0 >> 27usize) & 0x01;
742 val != 0
743 }
744 #[doc = "Update CH\\[x\\].DAR register from memory. This bit is used to control the update of CH\\[x\\].DAR from the memory during the link transfer."]
745 #[inline(always)]
746 pub fn set_uda(&mut self, val: bool) {
747 self.0 = (self.0 & !(0x01 << 27usize)) | (((val as u32) & 0x01) << 27usize);
748 }
749 #[doc = "update CH\\[x\\].SAR from memory. This bit controls the update of CH\\[x\\].SAR from the memory during the link transfer."]
750 #[inline(always)]
751 pub const fn usa(&self) -> bool {
752 let val = (self.0 >> 28usize) & 0x01;
753 val != 0
754 }
755 #[doc = "update CH\\[x\\].SAR from memory. This bit controls the update of CH\\[x\\].SAR from the memory during the link transfer."]
756 #[inline(always)]
757 pub fn set_usa(&mut self, val: bool) {
758 self.0 = (self.0 & !(0x01 << 28usize)) | (((val as u32) & 0x01) << 28usize);
759 }
760 #[doc = "Update CH\\[x\\].BR1 from memory. This bit controls the update of CH\\[x\\].BR1 from the memory during the link transfer. If UB1 = 0 and if CH\\[x\\].LLR ≠ 0, the linked-list is not completed. CH\\[x\\].BR1.BNDT\\[15:0\\]
761is then restored to the programmed value after data transfer is completed and before the link transfer."]
762 #[inline(always)]
763 pub const fn ub1(&self) -> bool {
764 let val = (self.0 >> 29usize) & 0x01;
765 val != 0
766 }
767 #[doc = "Update CH\\[x\\].BR1 from memory. This bit controls the update of CH\\[x\\].BR1 from the memory during the link transfer. If UB1 = 0 and if CH\\[x\\].LLR ≠ 0, the linked-list is not completed. CH\\[x\\].BR1.BNDT\\[15:0\\]
768is then restored to the programmed value after data transfer is completed and before the link transfer."]
769 #[inline(always)]
770 pub fn set_ub1(&mut self, val: bool) {
771 self.0 = (self.0 & !(0x01 << 29usize)) | (((val as u32) & 0x01) << 29usize);
772 }
773 #[doc = "Update CH\\[x\\].TR2 from memory. This bit controls the update of CH\\[x\\].TR2 from the memory during the link transfer."]
774 #[inline(always)]
775 pub const fn ut2(&self) -> bool {
776 let val = (self.0 >> 30usize) & 0x01;
777 val != 0
778 }
779 #[doc = "Update CH\\[x\\].TR2 from memory. This bit controls the update of CH\\[x\\].TR2 from the memory during the link transfer."]
780 #[inline(always)]
781 pub fn set_ut2(&mut self, val: bool) {
782 self.0 = (self.0 & !(0x01 << 30usize)) | (((val as u32) & 0x01) << 30usize);
783 }
784 #[doc = "Update CH\\[x\\].TR1 from memory. This bit controls the update of CH\\[x\\].TR1 from the memory during the link transfer."]
785 #[inline(always)]
786 pub const fn ut1(&self) -> bool {
787 let val = (self.0 >> 31usize) & 0x01;
788 val != 0
789 }
790 #[doc = "Update CH\\[x\\].TR1 from memory. This bit controls the update of CH\\[x\\].TR1 from the memory during the link transfer."]
791 #[inline(always)]
792 pub fn set_ut1(&mut self, val: bool) {
793 self.0 = (self.0 & !(0x01 << 31usize)) | (((val as u32) & 0x01) << 31usize);
794 }
795 }
796 impl Default for ChLlr {
797 #[inline(always)]
798 fn default() -> ChLlr {
799 ChLlr(0)
800 }
801 }
802 impl core::fmt::Debug for ChLlr {
803 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
804 f.debug_struct("ChLlr")
805 .field("la", &self.la())
806 .field("ull", &self.ull())
807 .field("ub2", &self.ub2())
808 .field("ut3", &self.ut3())
809 .field("uda", &self.uda())
810 .field("usa", &self.usa())
811 .field("ub1", &self.ub1())
812 .field("ut2", &self.ut2())
813 .field("ut1", &self.ut1())
814 .finish()
815 }
816 }
817 #[cfg(feature = "defmt")]
818 impl defmt::Format for ChLlr {
819 fn format(&self, f: defmt::Formatter) {
820 #[derive(defmt :: Format)]
821 struct ChLlr {
822 la: u16,
823 ull: bool,
824 ub2: bool,
825 ut3: bool,
826 uda: bool,
827 usa: bool,
828 ub1: bool,
829 ut2: bool,
830 ut1: bool,
831 }
832 let proxy = ChLlr {
833 la: self.la(),
834 ull: self.ull(),
835 ub2: self.ub2(),
836 ut3: self.ut3(),
837 uda: self.uda(),
838 usa: self.usa(),
839 ub1: self.ub1(),
840 ut2: self.ut2(),
841 ut1: self.ut1(),
842 };
843 defmt::write!(f, "{}", proxy)
844 }
845 }
846 #[doc = "LPDMA channel 15 status register"]
847 #[repr(transparent)]
848 #[derive(Copy, Clone, Eq, PartialEq)]
849 pub struct ChSr(pub u32);
850 impl ChSr {
851 #[doc = "idle flag. This idle flag is de-asserted by hardware when the channel is enabled (CH\\[x\\].CR.EN = 1) with a valid channel configuration (no USEF to be immediately reported). This idle flag is asserted after hard reset or by hardware when the channel is back in idle state (in suspended or disabled state)."]
852 #[inline(always)]
853 pub const fn idlef(&self) -> bool {
854 let val = (self.0 >> 0usize) & 0x01;
855 val != 0
856 }
857 #[doc = "idle flag. This idle flag is de-asserted by hardware when the channel is enabled (CH\\[x\\].CR.EN = 1) with a valid channel configuration (no USEF to be immediately reported). This idle flag is asserted after hard reset or by hardware when the channel is back in idle state (in suspended or disabled state)."]
858 #[inline(always)]
859 pub fn set_idlef(&mut self, val: bool) {
860 self.0 = (self.0 & !(0x01 << 0usize)) | (((val as u32) & 0x01) << 0usize);
861 }
862 #[doc = "transfer complete flag. A transfer complete event is either a block transfer complete, a 2D/repeated block transfer complete, a LLI transfer complete including the upload of the next LLI if any, or the full linked-list completion, depending on the transfer complete event mode (CH\\[x\\].TR2.TCEM\\[1:0\\])."]
863 #[inline(always)]
864 pub const fn tcf(&self) -> bool {
865 let val = (self.0 >> 8usize) & 0x01;
866 val != 0
867 }
868 #[doc = "transfer complete flag. A transfer complete event is either a block transfer complete, a 2D/repeated block transfer complete, a LLI transfer complete including the upload of the next LLI if any, or the full linked-list completion, depending on the transfer complete event mode (CH\\[x\\].TR2.TCEM\\[1:0\\])."]
869 #[inline(always)]
870 pub fn set_tcf(&mut self, val: bool) {
871 self.0 = (self.0 & !(0x01 << 8usize)) | (((val as u32) & 0x01) << 8usize);
872 }
873 #[doc = "half transfer flag. An half transfer event is either an half block transfer or an half 2D/repeated block transfer, depending on the transfer complete event mode (CH\\[x\\].TR2.TCEM\\[1:0\\]). An half block transfer occurs when half of the bytes of the source block size (rounded up integer of CH\\[x\\].BR1.BNDT\\[15:0\\]/2) has been transferred to the destination. An half 2D/repeated block transfer occurs when half of the repeated blocks (rounded up integer of (CH\\[x\\].BR1.BRC\\[10:0\\]+1)/2)) has been transferred to the destination."]
874 #[inline(always)]
875 pub const fn htf(&self) -> bool {
876 let val = (self.0 >> 9usize) & 0x01;
877 val != 0
878 }
879 #[doc = "half transfer flag. An half transfer event is either an half block transfer or an half 2D/repeated block transfer, depending on the transfer complete event mode (CH\\[x\\].TR2.TCEM\\[1:0\\]). An half block transfer occurs when half of the bytes of the source block size (rounded up integer of CH\\[x\\].BR1.BNDT\\[15:0\\]/2) has been transferred to the destination. An half 2D/repeated block transfer occurs when half of the repeated blocks (rounded up integer of (CH\\[x\\].BR1.BRC\\[10:0\\]+1)/2)) has been transferred to the destination."]
880 #[inline(always)]
881 pub fn set_htf(&mut self, val: bool) {
882 self.0 = (self.0 & !(0x01 << 9usize)) | (((val as u32) & 0x01) << 9usize);
883 }
884 #[doc = "data transfer error flag"]
885 #[inline(always)]
886 pub const fn dtef(&self) -> bool {
887 let val = (self.0 >> 10usize) & 0x01;
888 val != 0
889 }
890 #[doc = "data transfer error flag"]
891 #[inline(always)]
892 pub fn set_dtef(&mut self, val: bool) {
893 self.0 = (self.0 & !(0x01 << 10usize)) | (((val as u32) & 0x01) << 10usize);
894 }
895 #[doc = "update link transfer error flag"]
896 #[inline(always)]
897 pub const fn ulef(&self) -> bool {
898 let val = (self.0 >> 11usize) & 0x01;
899 val != 0
900 }
901 #[doc = "update link transfer error flag"]
902 #[inline(always)]
903 pub fn set_ulef(&mut self, val: bool) {
904 self.0 = (self.0 & !(0x01 << 11usize)) | (((val as u32) & 0x01) << 11usize);
905 }
906 #[doc = "user setting error flag"]
907 #[inline(always)]
908 pub const fn usef(&self) -> bool {
909 let val = (self.0 >> 12usize) & 0x01;
910 val != 0
911 }
912 #[doc = "user setting error flag"]
913 #[inline(always)]
914 pub fn set_usef(&mut self, val: bool) {
915 self.0 = (self.0 & !(0x01 << 12usize)) | (((val as u32) & 0x01) << 12usize);
916 }
917 #[doc = "completed suspension flag"]
918 #[inline(always)]
919 pub const fn suspf(&self) -> bool {
920 let val = (self.0 >> 13usize) & 0x01;
921 val != 0
922 }
923 #[doc = "completed suspension flag"]
924 #[inline(always)]
925 pub fn set_suspf(&mut self, val: bool) {
926 self.0 = (self.0 & !(0x01 << 13usize)) | (((val as u32) & 0x01) << 13usize);
927 }
928 #[doc = "trigger overrun flag"]
929 #[inline(always)]
930 pub const fn tof(&self) -> bool {
931 let val = (self.0 >> 14usize) & 0x01;
932 val != 0
933 }
934 #[doc = "trigger overrun flag"]
935 #[inline(always)]
936 pub fn set_tof(&mut self, val: bool) {
937 self.0 = (self.0 & !(0x01 << 14usize)) | (((val as u32) & 0x01) << 14usize);
938 }
939 }
940 impl Default for ChSr {
941 #[inline(always)]
942 fn default() -> ChSr {
943 ChSr(0)
944 }
945 }
946 impl core::fmt::Debug for ChSr {
947 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
948 f.debug_struct("ChSr")
949 .field("idlef", &self.idlef())
950 .field("tcf", &self.tcf())
951 .field("htf", &self.htf())
952 .field("dtef", &self.dtef())
953 .field("ulef", &self.ulef())
954 .field("usef", &self.usef())
955 .field("suspf", &self.suspf())
956 .field("tof", &self.tof())
957 .finish()
958 }
959 }
960 #[cfg(feature = "defmt")]
961 impl defmt::Format for ChSr {
962 fn format(&self, f: defmt::Formatter) {
963 #[derive(defmt :: Format)]
964 struct ChSr {
965 idlef: bool,
966 tcf: bool,
967 htf: bool,
968 dtef: bool,
969 ulef: bool,
970 usef: bool,
971 suspf: bool,
972 tof: bool,
973 }
974 let proxy = ChSr {
975 idlef: self.idlef(),
976 tcf: self.tcf(),
977 htf: self.htf(),
978 dtef: self.dtef(),
979 ulef: self.ulef(),
980 usef: self.usef(),
981 suspf: self.suspf(),
982 tof: self.tof(),
983 };
984 defmt::write!(f, "{}", proxy)
985 }
986 }
987 #[doc = "LPDMA channel 8 transfer register 1"]
988 #[repr(transparent)]
989 #[derive(Copy, Clone, Eq, PartialEq)]
990 pub struct ChTr1(pub u32);
991 impl ChTr1 {
992 #[doc = "binary logarithm of the source data width of a burst in bytes. Note: Setting a 8-byte data width causes a user setting error to be reported and no transfer is issued. A source block size must be a multiple of the source data width (CH\\[x\\].BR1.BNDT\\[2:0\\]
993versus SDW_LOG2\\[1:0\\]). Otherwise, a user setting error is reported and no transfer is issued. A source single transfer must have an aligned address with its data width (start address CH\\[x\\].SAR\\[2:0\\]
994versus SDW_LOG2\\[1:0\\]). Otherwise, a user setting error is reported and none transfer is issued."]
995 #[inline(always)]
996 pub const fn sdw(&self) -> super::vals::Dw {
997 let val = (self.0 >> 0usize) & 0x03;
998 super::vals::Dw::from_bits(val as u8)
999 }
1000 #[doc = "binary logarithm of the source data width of a burst in bytes. Note: Setting a 8-byte data width causes a user setting error to be reported and no transfer is issued. A source block size must be a multiple of the source data width (CH\\[x\\].BR1.BNDT\\[2:0\\]
1001versus SDW_LOG2\\[1:0\\]). Otherwise, a user setting error is reported and no transfer is issued. A source single transfer must have an aligned address with its data width (start address CH\\[x\\].SAR\\[2:0\\]
1002versus SDW_LOG2\\[1:0\\]). Otherwise, a user setting error is reported and none transfer is issued."]
1003 #[inline(always)]
1004 pub fn set_sdw(&mut self, val: super::vals::Dw) {
1005 self.0 = (self.0 & !(0x03 << 0usize)) | (((val.to_bits() as u32) & 0x03) << 0usize);
1006 }
1007 #[doc = "source incrementing burst. The source address, pointed by CH\\[x\\].SAR, is kept constant after a burst beat/single transfer or is incremented by the offset value corresponding to a contiguous data after a burst beat/single transfer."]
1008 #[inline(always)]
1009 pub const fn sinc(&self) -> bool {
1010 let val = (self.0 >> 3usize) & 0x01;
1011 val != 0
1012 }
1013 #[doc = "source incrementing burst. The source address, pointed by CH\\[x\\].SAR, is kept constant after a burst beat/single transfer or is incremented by the offset value corresponding to a contiguous data after a burst beat/single transfer."]
1014 #[inline(always)]
1015 pub fn set_sinc(&mut self, val: bool) {
1016 self.0 = (self.0 & !(0x01 << 3usize)) | (((val as u32) & 0x01) << 3usize);
1017 }
1018 #[doc = "padding/alignment mode. If DDW\\[1:0\\]
1019= SDW_LOG2\\[1:0\\]: if the data width of a burst destination transfer is equal to the data width of a burst source transfer, these bits are ignored. Else: - Case 1: If destination data width > source data width. 1x: successive source data are FIFO queued and packed at the destination data width, in a left (LSB) to right (MSB) order (named little endian), before a destination transfer. - Case 2: If destination data width < source data width. 1x: source data is FIFO queued and unpacked at the destination data width, to be transferred in a left (LSB) to right (MSB) order (named little endian) to the destination. Note:"]
1020 #[inline(always)]
1021 pub const fn pam(&self) -> super::vals::Pam {
1022 let val = (self.0 >> 11usize) & 0x03;
1023 super::vals::Pam::from_bits(val as u8)
1024 }
1025 #[doc = "padding/alignment mode. If DDW\\[1:0\\]
1026= SDW_LOG2\\[1:0\\]: if the data width of a burst destination transfer is equal to the data width of a burst source transfer, these bits are ignored. Else: - Case 1: If destination data width > source data width. 1x: successive source data are FIFO queued and packed at the destination data width, in a left (LSB) to right (MSB) order (named little endian), before a destination transfer. - Case 2: If destination data width < source data width. 1x: source data is FIFO queued and unpacked at the destination data width, to be transferred in a left (LSB) to right (MSB) order (named little endian) to the destination. Note:"]
1027 #[inline(always)]
1028 pub fn set_pam(&mut self, val: super::vals::Pam) {
1029 self.0 = (self.0 & !(0x03 << 11usize)) | (((val.to_bits() as u32) & 0x03) << 11usize);
1030 }
1031 #[doc = "security attribute of the LPDMA transfer from the source. If SECCFGR.SECx = 1 and the access is secure: This is a secure register bit. This bit can only be read by a secure software. This bit must be written by a secure software when SECCFGR.SECx =1 . A secure write is ignored when SECCFGR.SECx = 0. When SECCFGR.SECx is de-asserted, this SSEC bit is also de-asserted by hardware (on a secure reconfiguration of the channel as non-secure), and the LPDMA transfer from the source is non-secure."]
1032 #[inline(always)]
1033 pub const fn ssec(&self) -> bool {
1034 let val = (self.0 >> 15usize) & 0x01;
1035 val != 0
1036 }
1037 #[doc = "security attribute of the LPDMA transfer from the source. If SECCFGR.SECx = 1 and the access is secure: This is a secure register bit. This bit can only be read by a secure software. This bit must be written by a secure software when SECCFGR.SECx =1 . A secure write is ignored when SECCFGR.SECx = 0. When SECCFGR.SECx is de-asserted, this SSEC bit is also de-asserted by hardware (on a secure reconfiguration of the channel as non-secure), and the LPDMA transfer from the source is non-secure."]
1038 #[inline(always)]
1039 pub fn set_ssec(&mut self, val: bool) {
1040 self.0 = (self.0 & !(0x01 << 15usize)) | (((val as u32) & 0x01) << 15usize);
1041 }
1042 #[doc = "binary logarithm of the destination data width of a burst, in bytes. Note: Setting a 8-byte data width causes a user setting error to be reported and none transfer is issued. A destination burst transfer must have an aligned address with its data width (start address CH\\[x\\].DAR\\[2:0\\]
1043and address offset CH\\[x\\].TR3.DAO\\[2:0\\], versus DDW\\[1:0\\]). Otherwise a user setting error is reported and no transfer is issued."]
1044 #[inline(always)]
1045 pub const fn ddw(&self) -> super::vals::Dw {
1046 let val = (self.0 >> 16usize) & 0x03;
1047 super::vals::Dw::from_bits(val as u8)
1048 }
1049 #[doc = "binary logarithm of the destination data width of a burst, in bytes. Note: Setting a 8-byte data width causes a user setting error to be reported and none transfer is issued. A destination burst transfer must have an aligned address with its data width (start address CH\\[x\\].DAR\\[2:0\\]
1050and address offset CH\\[x\\].TR3.DAO\\[2:0\\], versus DDW\\[1:0\\]). Otherwise a user setting error is reported and no transfer is issued."]
1051 #[inline(always)]
1052 pub fn set_ddw(&mut self, val: super::vals::Dw) {
1053 self.0 = (self.0 & !(0x03 << 16usize)) | (((val.to_bits() as u32) & 0x03) << 16usize);
1054 }
1055 #[doc = "destination incrementing burst. The destination address, pointed by CH\\[x\\].DAR, is kept constant after a burst beat/single transfer, or is incremented by the offset value corresponding to a contiguous data after a burst beat/single transfer."]
1056 #[inline(always)]
1057 pub const fn dinc(&self) -> bool {
1058 let val = (self.0 >> 19usize) & 0x01;
1059 val != 0
1060 }
1061 #[doc = "destination incrementing burst. The destination address, pointed by CH\\[x\\].DAR, is kept constant after a burst beat/single transfer, or is incremented by the offset value corresponding to a contiguous data after a burst beat/single transfer."]
1062 #[inline(always)]
1063 pub fn set_dinc(&mut self, val: bool) {
1064 self.0 = (self.0 & !(0x01 << 19usize)) | (((val as u32) & 0x01) << 19usize);
1065 }
1066 #[doc = "security attribute of the LPDMA transfer to the destination. If SECCFGR.SECx = 1 and the access is secure: This is a secure register bit. This bit can only be read by a secure software. This bit must be written by a secure software when SECCFGR.SECx = 1. A secure write is ignored when SECCFGR.SECx = 0. When SECCFGR.SECx is de-asserted, this DSEC bit is also de-asserted by hardware (on a secure reconfiguration of the channel as non-secure), and the LPDMA transfer to the destination is non-secure."]
1067 #[inline(always)]
1068 pub const fn dsec(&self) -> bool {
1069 let val = (self.0 >> 31usize) & 0x01;
1070 val != 0
1071 }
1072 #[doc = "security attribute of the LPDMA transfer to the destination. If SECCFGR.SECx = 1 and the access is secure: This is a secure register bit. This bit can only be read by a secure software. This bit must be written by a secure software when SECCFGR.SECx = 1. A secure write is ignored when SECCFGR.SECx = 0. When SECCFGR.SECx is de-asserted, this DSEC bit is also de-asserted by hardware (on a secure reconfiguration of the channel as non-secure), and the LPDMA transfer to the destination is non-secure."]
1073 #[inline(always)]
1074 pub fn set_dsec(&mut self, val: bool) {
1075 self.0 = (self.0 & !(0x01 << 31usize)) | (((val as u32) & 0x01) << 31usize);
1076 }
1077 }
1078 impl Default for ChTr1 {
1079 #[inline(always)]
1080 fn default() -> ChTr1 {
1081 ChTr1(0)
1082 }
1083 }
1084 impl core::fmt::Debug for ChTr1 {
1085 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
1086 f.debug_struct("ChTr1")
1087 .field("sdw", &self.sdw())
1088 .field("sinc", &self.sinc())
1089 .field("pam", &self.pam())
1090 .field("ssec", &self.ssec())
1091 .field("ddw", &self.ddw())
1092 .field("dinc", &self.dinc())
1093 .field("dsec", &self.dsec())
1094 .finish()
1095 }
1096 }
1097 #[cfg(feature = "defmt")]
1098 impl defmt::Format for ChTr1 {
1099 fn format(&self, f: defmt::Formatter) {
1100 #[derive(defmt :: Format)]
1101 struct ChTr1 {
1102 sdw: super::vals::Dw,
1103 sinc: bool,
1104 pam: super::vals::Pam,
1105 ssec: bool,
1106 ddw: super::vals::Dw,
1107 dinc: bool,
1108 dsec: bool,
1109 }
1110 let proxy = ChTr1 {
1111 sdw: self.sdw(),
1112 sinc: self.sinc(),
1113 pam: self.pam(),
1114 ssec: self.ssec(),
1115 ddw: self.ddw(),
1116 dinc: self.dinc(),
1117 dsec: self.dsec(),
1118 };
1119 defmt::write!(f, "{}", proxy)
1120 }
1121 }
1122 #[doc = "LPDMA channel 10 transfer register 2"]
1123 #[repr(transparent)]
1124 #[derive(Copy, Clone, Eq, PartialEq)]
1125 pub struct ChTr2(pub u32);
1126 impl ChTr2 {
1127 #[doc = "LPDMA hardware request selection. These bits are ignored if channel x is activated (CH\\[x\\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer). Else, the selected hardware request is internally taken into account as per . The user must not assign a same input hardware request (same REQSEL\\[6:0\\]
1128value) to different active LPDMA channels (CH\\[x\\].CR.EN = 1 and CH\\[x\\].TR2.SWREQ = 0 for these channels). LPDMA is not intended to hardware support the case of simultaneous enabled channels incorrectly configured with a same hardware peripheral request signal, and there is no user setting error reporting."]
1129 #[inline(always)]
1130 pub const fn reqsel(&self) -> u8 {
1131 let val = (self.0 >> 0usize) & 0x7f;
1132 val as u8
1133 }
1134 #[doc = "LPDMA hardware request selection. These bits are ignored if channel x is activated (CH\\[x\\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer). Else, the selected hardware request is internally taken into account as per . The user must not assign a same input hardware request (same REQSEL\\[6:0\\]
1135value) to different active LPDMA channels (CH\\[x\\].CR.EN = 1 and CH\\[x\\].TR2.SWREQ = 0 for these channels). LPDMA is not intended to hardware support the case of simultaneous enabled channels incorrectly configured with a same hardware peripheral request signal, and there is no user setting error reporting."]
1136 #[inline(always)]
1137 pub fn set_reqsel(&mut self, val: u8) {
1138 self.0 = (self.0 & !(0x7f << 0usize)) | (((val as u32) & 0x7f) << 0usize);
1139 }
1140 #[doc = "software request. This bit is internally taken into account when CH\\[x\\].CR.EN is asserted."]
1141 #[inline(always)]
1142 pub const fn swreq(&self) -> super::vals::Swreq {
1143 let val = (self.0 >> 9usize) & 0x01;
1144 super::vals::Swreq::from_bits(val as u8)
1145 }
1146 #[doc = "software request. This bit is internally taken into account when CH\\[x\\].CR.EN is asserted."]
1147 #[inline(always)]
1148 pub fn set_swreq(&mut self, val: super::vals::Swreq) {
1149 self.0 = (self.0 & !(0x01 << 9usize)) | (((val.to_bits() as u32) & 0x01) << 9usize);
1150 }
1151 #[doc = "destination hardware request. This bit is ignored if channel x is activated (CH\\[x\\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer). Else: Note:"]
1152 #[inline(always)]
1153 pub const fn dreq(&self) -> super::vals::Dreq {
1154 let val = (self.0 >> 10usize) & 0x01;
1155 super::vals::Dreq::from_bits(val as u8)
1156 }
1157 #[doc = "destination hardware request. This bit is ignored if channel x is activated (CH\\[x\\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer). Else: Note:"]
1158 #[inline(always)]
1159 pub fn set_dreq(&mut self, val: super::vals::Dreq) {
1160 self.0 = (self.0 & !(0x01 << 10usize)) | (((val.to_bits() as u32) & 0x01) << 10usize);
1161 }
1162 #[doc = "Block hardware request. If the channel x is activated (CH\\[x\\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer), this bit is ignored. Else:"]
1163 #[inline(always)]
1164 pub const fn breq(&self) -> super::vals::Breq {
1165 let val = (self.0 >> 11usize) & 0x01;
1166 super::vals::Breq::from_bits(val as u8)
1167 }
1168 #[doc = "Block hardware request. If the channel x is activated (CH\\[x\\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer), this bit is ignored. Else:"]
1169 #[inline(always)]
1170 pub fn set_breq(&mut self, val: super::vals::Breq) {
1171 self.0 = (self.0 & !(0x01 << 11usize)) | (((val.to_bits() as u32) & 0x01) << 11usize);
1172 }
1173 #[doc = "trigger mode. These bits define the transfer granularity for its conditioning by the trigger. If the channel x is enabled (CH\\[x\\].CR.EN asserted) with TRIGPOL\\[1:0\\]
1174= 00 or 11, these TRIGM\\[1:0\\]
1175bits are ignored. Else, a LPDMA transfer is conditioned by at least one trigger hit: first burst read of a 2D/repeated block transfer is conditioned by one hit trigger. – If the peripheral is programmed as a source (DREQ = 0) of the LLI data transfer, each programmed burst read is conditioned. – If the peripheral is programmed as a destination (DREQ = 1) of the LLI data transfer, each programmed burst write is conditioned. The first memory burst read of a (possibly 2D/repeated) block, also named as the first ready FIFO-based source burst, is gated by the occurrence of both the hardware request and the first trigger hit. The LPDMA monitoring of a trigger for channel x is started when the channel is enabled/loaded with a new active trigger configuration: rising or falling edge on a selected trigger (TRIGPOL\\[1:0\\]
1176= 01 or respectively TRIGPOL\\[1:0\\]
1177= 10). The monitoring of this trigger is kept active during the triggered and uncompleted (data or link) transfer; and if a new trigger is detected then, this hit is internally memorized to grant the next transfer, as long as the defined rising or falling edge is not modified, and the TRIGSEL\\[5:0\\]
1178is not modified, and the channel is enabled. Transferring a next LLIn+1 that updates the CH\\[x\\].TR2 with a new value for any of TRIGSEL\\[5:0\\]
1179or TRIGPOL\\[1:0\\], resets the monitoring, trashing the memorized hit of the formerly defined LLIn trigger. After a first new trigger hitn+1 is memorized, if another second trigger hitn+2 is detected and if the hitn triggered transfer is still not completed, hitn+2 is lost and not memorized.memorized. A trigger overrun flag is reported (CH\\[x\\].SR.TOF =1 ), and an interrupt is generated if enabled (CH\\[x\\].CR.TOIE = 1). The channel is not automatically disabled by hardware due to a trigger overrun. Note: When the source block size is not a multiple of the source burst size and is a multiple of the source data width, then the last programmed source burst is not completed and is internally shorten to match the block size. In this case, if TRIGM\\[1:0\\]
1180= 11 and (SWREQ =1 or (SWREQ = 0 and DREQ =0 )), the shortened burst transfer (by singles or/and by bursts of lower length) is conditioned once by the trigger. When the programmed destination burst is internally shortened by singles or/and by bursts of lower length (versus FIFO size, versus block size, 1-Kbyte boundary address crossing): if the trigger is conditioning the programmed destination burst (if TRIGM\\[1:0\\]
1181= 11 and SWREQ = 0 and DREQ = 1), this shortened destination burst transfer is conditioned once by the trigger."]
1182 #[inline(always)]
1183 pub const fn trigm(&self) -> super::vals::Trigm {
1184 let val = (self.0 >> 14usize) & 0x03;
1185 super::vals::Trigm::from_bits(val as u8)
1186 }
1187 #[doc = "trigger mode. These bits define the transfer granularity for its conditioning by the trigger. If the channel x is enabled (CH\\[x\\].CR.EN asserted) with TRIGPOL\\[1:0\\]
1188= 00 or 11, these TRIGM\\[1:0\\]
1189bits are ignored. Else, a LPDMA transfer is conditioned by at least one trigger hit: first burst read of a 2D/repeated block transfer is conditioned by one hit trigger. – If the peripheral is programmed as a source (DREQ = 0) of the LLI data transfer, each programmed burst read is conditioned. – If the peripheral is programmed as a destination (DREQ = 1) of the LLI data transfer, each programmed burst write is conditioned. The first memory burst read of a (possibly 2D/repeated) block, also named as the first ready FIFO-based source burst, is gated by the occurrence of both the hardware request and the first trigger hit. The LPDMA monitoring of a trigger for channel x is started when the channel is enabled/loaded with a new active trigger configuration: rising or falling edge on a selected trigger (TRIGPOL\\[1:0\\]
1190= 01 or respectively TRIGPOL\\[1:0\\]
1191= 10). The monitoring of this trigger is kept active during the triggered and uncompleted (data or link) transfer; and if a new trigger is detected then, this hit is internally memorized to grant the next transfer, as long as the defined rising or falling edge is not modified, and the TRIGSEL\\[5:0\\]
1192is not modified, and the channel is enabled. Transferring a next LLIn+1 that updates the CH\\[x\\].TR2 with a new value for any of TRIGSEL\\[5:0\\]
1193or TRIGPOL\\[1:0\\], resets the monitoring, trashing the memorized hit of the formerly defined LLIn trigger. After a first new trigger hitn+1 is memorized, if another second trigger hitn+2 is detected and if the hitn triggered transfer is still not completed, hitn+2 is lost and not memorized.memorized. A trigger overrun flag is reported (CH\\[x\\].SR.TOF =1 ), and an interrupt is generated if enabled (CH\\[x\\].CR.TOIE = 1). The channel is not automatically disabled by hardware due to a trigger overrun. Note: When the source block size is not a multiple of the source burst size and is a multiple of the source data width, then the last programmed source burst is not completed and is internally shorten to match the block size. In this case, if TRIGM\\[1:0\\]
1194= 11 and (SWREQ =1 or (SWREQ = 0 and DREQ =0 )), the shortened burst transfer (by singles or/and by bursts of lower length) is conditioned once by the trigger. When the programmed destination burst is internally shortened by singles or/and by bursts of lower length (versus FIFO size, versus block size, 1-Kbyte boundary address crossing): if the trigger is conditioning the programmed destination burst (if TRIGM\\[1:0\\]
1195= 11 and SWREQ = 0 and DREQ = 1), this shortened destination burst transfer is conditioned once by the trigger."]
1196 #[inline(always)]
1197 pub fn set_trigm(&mut self, val: super::vals::Trigm) {
1198 self.0 = (self.0 & !(0x03 << 14usize)) | (((val.to_bits() as u32) & 0x03) << 14usize);
1199 }
1200 #[doc = "trigger event input selection. These bits select the trigger event input of the LPDMA transfer (as per ), with an active trigger event if TRIGPOL\\[1:0\\]
1201≠ 00."]
1202 #[inline(always)]
1203 pub const fn trigsel(&self) -> u8 {
1204 let val = (self.0 >> 16usize) & 0x3f;
1205 val as u8
1206 }
1207 #[doc = "trigger event input selection. These bits select the trigger event input of the LPDMA transfer (as per ), with an active trigger event if TRIGPOL\\[1:0\\]
1208≠ 00."]
1209 #[inline(always)]
1210 pub fn set_trigsel(&mut self, val: u8) {
1211 self.0 = (self.0 & !(0x3f << 16usize)) | (((val as u32) & 0x3f) << 16usize);
1212 }
1213 #[doc = "trigger event polarity. These bits define the polarity of the selected trigger event input defined by TRIGSEL\\[5:0\\]."]
1214 #[inline(always)]
1215 pub const fn trigpol(&self) -> super::vals::Trigpol {
1216 let val = (self.0 >> 24usize) & 0x03;
1217 super::vals::Trigpol::from_bits(val as u8)
1218 }
1219 #[doc = "trigger event polarity. These bits define the polarity of the selected trigger event input defined by TRIGSEL\\[5:0\\]."]
1220 #[inline(always)]
1221 pub fn set_trigpol(&mut self, val: super::vals::Trigpol) {
1222 self.0 = (self.0 & !(0x03 << 24usize)) | (((val.to_bits() as u32) & 0x03) << 24usize);
1223 }
1224 #[doc = "transfer complete event mode. These bits define the transfer granularity for the transfer complete and half transfer complete events generation. Note: If the initial LLI0 data transfer is null/void (directly programmed by the internal register file with CH\\[x\\].BR1.BNDT\\[15:0\\]
1225= 0), then neither the complete transfer event nor the half transfer event is generated. Note: If the initial LLI0 data transfer is null/void (directly programmed by the internal register file with CH\\[x\\].BR1.BNDT\\[15:0\\]
1226= 0), then neither the complete transfer event nor the half transfer event is generated. Note: If the initial LLI0 data transfer is null/void (i.e. directly programmed by the internal register file with CH\\[x\\].BR1.BNDT\\[15:0\\]
1227=0 ), then the half transfer event is not generated, and the transfer complete event is generated when is completed the loading of the LLI1."]
1228 #[inline(always)]
1229 pub const fn tcem(&self) -> super::vals::Tcem {
1230 let val = (self.0 >> 30usize) & 0x03;
1231 super::vals::Tcem::from_bits(val as u8)
1232 }
1233 #[doc = "transfer complete event mode. These bits define the transfer granularity for the transfer complete and half transfer complete events generation. Note: If the initial LLI0 data transfer is null/void (directly programmed by the internal register file with CH\\[x\\].BR1.BNDT\\[15:0\\]
1234= 0), then neither the complete transfer event nor the half transfer event is generated. Note: If the initial LLI0 data transfer is null/void (directly programmed by the internal register file with CH\\[x\\].BR1.BNDT\\[15:0\\]
1235= 0), then neither the complete transfer event nor the half transfer event is generated. Note: If the initial LLI0 data transfer is null/void (i.e. directly programmed by the internal register file with CH\\[x\\].BR1.BNDT\\[15:0\\]
1236=0 ), then the half transfer event is not generated, and the transfer complete event is generated when is completed the loading of the LLI1."]
1237 #[inline(always)]
1238 pub fn set_tcem(&mut self, val: super::vals::Tcem) {
1239 self.0 = (self.0 & !(0x03 << 30usize)) | (((val.to_bits() as u32) & 0x03) << 30usize);
1240 }
1241 }
1242 impl Default for ChTr2 {
1243 #[inline(always)]
1244 fn default() -> ChTr2 {
1245 ChTr2(0)
1246 }
1247 }
1248 impl core::fmt::Debug for ChTr2 {
1249 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
1250 f.debug_struct("ChTr2")
1251 .field("reqsel", &self.reqsel())
1252 .field("swreq", &self.swreq())
1253 .field("dreq", &self.dreq())
1254 .field("breq", &self.breq())
1255 .field("trigm", &self.trigm())
1256 .field("trigsel", &self.trigsel())
1257 .field("trigpol", &self.trigpol())
1258 .field("tcem", &self.tcem())
1259 .finish()
1260 }
1261 }
1262 #[cfg(feature = "defmt")]
1263 impl defmt::Format for ChTr2 {
1264 fn format(&self, f: defmt::Formatter) {
1265 #[derive(defmt :: Format)]
1266 struct ChTr2 {
1267 reqsel: u8,
1268 swreq: super::vals::Swreq,
1269 dreq: super::vals::Dreq,
1270 breq: super::vals::Breq,
1271 trigm: super::vals::Trigm,
1272 trigsel: u8,
1273 trigpol: super::vals::Trigpol,
1274 tcem: super::vals::Tcem,
1275 }
1276 let proxy = ChTr2 {
1277 reqsel: self.reqsel(),
1278 swreq: self.swreq(),
1279 dreq: self.dreq(),
1280 breq: self.breq(),
1281 trigm: self.trigm(),
1282 trigsel: self.trigsel(),
1283 trigpol: self.trigpol(),
1284 tcem: self.tcem(),
1285 };
1286 defmt::write!(f, "{}", proxy)
1287 }
1288 }
1289 #[doc = "LPDMA channel 14 transfer register 3"]
1290 #[repr(transparent)]
1291 #[derive(Copy, Clone, Eq, PartialEq)]
1292 pub struct ChTr3(pub u32);
1293 impl ChTr3 {
1294 #[doc = "source address offset increment. The source address, pointed by CH\\[x\\].SAR, is incremented or decremented (depending on CH\\[x\\].BR1.SDEC) by this offset SAO\\[12:0\\]
1295for each programmed source burst. This offset is not including and is added to the programmed burst size when the completed burst is addressed in incremented mode (CH\\[x\\].TR1.SINC = 1). Note: A source address offset must be aligned with the programmed data width of a source burst (SAO\\[2:0\\]
1296versus CH\\[x\\].TR1.SDW_LOG2\\[1:0\\]). Else a user setting error is reported and none transfer is issued. When the source block size is not a multiple of the destination burst size and is a multiple of the source data width, then the last programmed source burst is not completed and is internally shorten to match the block size. In this case, the additional CH\\[x\\].TR3.SAO\\[12:0\\]
1297is not applied."]
1298 #[inline(always)]
1299 pub const fn sao(&self) -> u16 {
1300 let val = (self.0 >> 0usize) & 0x1fff;
1301 val as u16
1302 }
1303 #[doc = "source address offset increment. The source address, pointed by CH\\[x\\].SAR, is incremented or decremented (depending on CH\\[x\\].BR1.SDEC) by this offset SAO\\[12:0\\]
1304for each programmed source burst. This offset is not including and is added to the programmed burst size when the completed burst is addressed in incremented mode (CH\\[x\\].TR1.SINC = 1). Note: A source address offset must be aligned with the programmed data width of a source burst (SAO\\[2:0\\]
1305versus CH\\[x\\].TR1.SDW_LOG2\\[1:0\\]). Else a user setting error is reported and none transfer is issued. When the source block size is not a multiple of the destination burst size and is a multiple of the source data width, then the last programmed source burst is not completed and is internally shorten to match the block size. In this case, the additional CH\\[x\\].TR3.SAO\\[12:0\\]
1306is not applied."]
1307 #[inline(always)]
1308 pub fn set_sao(&mut self, val: u16) {
1309 self.0 = (self.0 & !(0x1fff << 0usize)) | (((val as u32) & 0x1fff) << 0usize);
1310 }
1311 #[doc = "destination address offset increment. The destination address, pointed by CH\\[x\\].DAR, is incremented or decremented (depending on CH\\[x\\].BR1.DDEC) by this offset DAO\\[12:0\\]
1312for each programmed destination burst. This offset is not including and is added to the programmed burst size when the completed burst is addressed in incremented mode (CH\\[x\\].TR1.DINC = 1). Note: A destination address offset must be aligned with the programmed data width of a destination burst (DAO\\[2:0\\]
1313versus CH\\[x\\].TR1.DDW\\[1:0\\]). Else, a user setting error is reported and no transfer is issued."]
1314 #[inline(always)]
1315 pub const fn dao(&self) -> u16 {
1316 let val = (self.0 >> 16usize) & 0x1fff;
1317 val as u16
1318 }
1319 #[doc = "destination address offset increment. The destination address, pointed by CH\\[x\\].DAR, is incremented or decremented (depending on CH\\[x\\].BR1.DDEC) by this offset DAO\\[12:0\\]
1320for each programmed destination burst. This offset is not including and is added to the programmed burst size when the completed burst is addressed in incremented mode (CH\\[x\\].TR1.DINC = 1). Note: A destination address offset must be aligned with the programmed data width of a destination burst (DAO\\[2:0\\]
1321versus CH\\[x\\].TR1.DDW\\[1:0\\]). Else, a user setting error is reported and no transfer is issued."]
1322 #[inline(always)]
1323 pub fn set_dao(&mut self, val: u16) {
1324 self.0 = (self.0 & !(0x1fff << 16usize)) | (((val as u32) & 0x1fff) << 16usize);
1325 }
1326 }
1327 impl Default for ChTr3 {
1328 #[inline(always)]
1329 fn default() -> ChTr3 {
1330 ChTr3(0)
1331 }
1332 }
1333 impl core::fmt::Debug for ChTr3 {
1334 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
1335 f.debug_struct("ChTr3")
1336 .field("sao", &self.sao())
1337 .field("dao", &self.dao())
1338 .finish()
1339 }
1340 }
1341 #[cfg(feature = "defmt")]
1342 impl defmt::Format for ChTr3 {
1343 fn format(&self, f: defmt::Formatter) {
1344 #[derive(defmt :: Format)]
1345 struct ChTr3 {
1346 sao: u16,
1347 dao: u16,
1348 }
1349 let proxy = ChTr3 {
1350 sao: self.sao(),
1351 dao: self.dao(),
1352 };
1353 defmt::write!(f, "{}", proxy)
1354 }
1355 }
1356 #[doc = "LPDMA secure masked interrupt status register"]
1357 #[repr(transparent)]
1358 #[derive(Copy, Clone, Eq, PartialEq)]
1359 pub struct Misr(pub u32);
1360 impl Misr {
1361 #[doc = "MIS0"]
1362 #[inline(always)]
1363 pub const fn mis(&self, n: usize) -> bool {
1364 assert!(n < 16usize);
1365 let offs = 0usize + n * 1usize;
1366 let val = (self.0 >> offs) & 0x01;
1367 val != 0
1368 }
1369 #[doc = "MIS0"]
1370 #[inline(always)]
1371 pub fn set_mis(&mut self, n: usize, val: bool) {
1372 assert!(n < 16usize);
1373 let offs = 0usize + n * 1usize;
1374 self.0 = (self.0 & !(0x01 << offs)) | (((val as u32) & 0x01) << offs);
1375 }
1376 }
1377 impl Default for Misr {
1378 #[inline(always)]
1379 fn default() -> Misr {
1380 Misr(0)
1381 }
1382 }
1383 impl core::fmt::Debug for Misr {
1384 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
1385 f.debug_struct("Misr")
1386 .field(
1387 "mis",
1388 &[
1389 self.mis(0usize),
1390 self.mis(1usize),
1391 self.mis(2usize),
1392 self.mis(3usize),
1393 self.mis(4usize),
1394 self.mis(5usize),
1395 self.mis(6usize),
1396 self.mis(7usize),
1397 self.mis(8usize),
1398 self.mis(9usize),
1399 self.mis(10usize),
1400 self.mis(11usize),
1401 self.mis(12usize),
1402 self.mis(13usize),
1403 self.mis(14usize),
1404 self.mis(15usize),
1405 ],
1406 )
1407 .finish()
1408 }
1409 }
1410 #[cfg(feature = "defmt")]
1411 impl defmt::Format for Misr {
1412 fn format(&self, f: defmt::Formatter) {
1413 #[derive(defmt :: Format)]
1414 struct Misr {
1415 mis: [bool; 16usize],
1416 }
1417 let proxy = Misr {
1418 mis: [
1419 self.mis(0usize),
1420 self.mis(1usize),
1421 self.mis(2usize),
1422 self.mis(3usize),
1423 self.mis(4usize),
1424 self.mis(5usize),
1425 self.mis(6usize),
1426 self.mis(7usize),
1427 self.mis(8usize),
1428 self.mis(9usize),
1429 self.mis(10usize),
1430 self.mis(11usize),
1431 self.mis(12usize),
1432 self.mis(13usize),
1433 self.mis(14usize),
1434 self.mis(15usize),
1435 ],
1436 };
1437 defmt::write!(f, "{}", proxy)
1438 }
1439 }
1440 #[doc = "LPDMA privileged configuration register"]
1441 #[repr(transparent)]
1442 #[derive(Copy, Clone, Eq, PartialEq)]
1443 pub struct Privcfgr(pub u32);
1444 impl Privcfgr {
1445 #[doc = "PRIV0"]
1446 #[inline(always)]
1447 pub const fn priv_(&self, n: usize) -> bool {
1448 assert!(n < 16usize);
1449 let offs = 0usize + n * 1usize;
1450 let val = (self.0 >> offs) & 0x01;
1451 val != 0
1452 }
1453 #[doc = "PRIV0"]
1454 #[inline(always)]
1455 pub fn set_priv_(&mut self, n: usize, val: bool) {
1456 assert!(n < 16usize);
1457 let offs = 0usize + n * 1usize;
1458 self.0 = (self.0 & !(0x01 << offs)) | (((val as u32) & 0x01) << offs);
1459 }
1460 }
1461 impl Default for Privcfgr {
1462 #[inline(always)]
1463 fn default() -> Privcfgr {
1464 Privcfgr(0)
1465 }
1466 }
1467 impl core::fmt::Debug for Privcfgr {
1468 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
1469 f.debug_struct("Privcfgr")
1470 .field(
1471 "priv_",
1472 &[
1473 self.priv_(0usize),
1474 self.priv_(1usize),
1475 self.priv_(2usize),
1476 self.priv_(3usize),
1477 self.priv_(4usize),
1478 self.priv_(5usize),
1479 self.priv_(6usize),
1480 self.priv_(7usize),
1481 self.priv_(8usize),
1482 self.priv_(9usize),
1483 self.priv_(10usize),
1484 self.priv_(11usize),
1485 self.priv_(12usize),
1486 self.priv_(13usize),
1487 self.priv_(14usize),
1488 self.priv_(15usize),
1489 ],
1490 )
1491 .finish()
1492 }
1493 }
1494 #[cfg(feature = "defmt")]
1495 impl defmt::Format for Privcfgr {
1496 fn format(&self, f: defmt::Formatter) {
1497 #[derive(defmt :: Format)]
1498 struct Privcfgr {
1499 priv_: [bool; 16usize],
1500 }
1501 let proxy = Privcfgr {
1502 priv_: [
1503 self.priv_(0usize),
1504 self.priv_(1usize),
1505 self.priv_(2usize),
1506 self.priv_(3usize),
1507 self.priv_(4usize),
1508 self.priv_(5usize),
1509 self.priv_(6usize),
1510 self.priv_(7usize),
1511 self.priv_(8usize),
1512 self.priv_(9usize),
1513 self.priv_(10usize),
1514 self.priv_(11usize),
1515 self.priv_(12usize),
1516 self.priv_(13usize),
1517 self.priv_(14usize),
1518 self.priv_(15usize),
1519 ],
1520 };
1521 defmt::write!(f, "{}", proxy)
1522 }
1523 }
1524 #[doc = "LPDMA configuration lock register"]
1525 #[repr(transparent)]
1526 #[derive(Copy, Clone, Eq, PartialEq)]
1527 pub struct Rcfglockr(pub u32);
1528 impl Rcfglockr {
1529 #[doc = "LOCK0"]
1530 #[inline(always)]
1531 pub const fn lock(&self, n: usize) -> bool {
1532 assert!(n < 16usize);
1533 let offs = 0usize + n * 1usize;
1534 let val = (self.0 >> offs) & 0x01;
1535 val != 0
1536 }
1537 #[doc = "LOCK0"]
1538 #[inline(always)]
1539 pub fn set_lock(&mut self, n: usize, val: bool) {
1540 assert!(n < 16usize);
1541 let offs = 0usize + n * 1usize;
1542 self.0 = (self.0 & !(0x01 << offs)) | (((val as u32) & 0x01) << offs);
1543 }
1544 }
1545 impl Default for Rcfglockr {
1546 #[inline(always)]
1547 fn default() -> Rcfglockr {
1548 Rcfglockr(0)
1549 }
1550 }
1551 impl core::fmt::Debug for Rcfglockr {
1552 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
1553 f.debug_struct("Rcfglockr")
1554 .field(
1555 "lock",
1556 &[
1557 self.lock(0usize),
1558 self.lock(1usize),
1559 self.lock(2usize),
1560 self.lock(3usize),
1561 self.lock(4usize),
1562 self.lock(5usize),
1563 self.lock(6usize),
1564 self.lock(7usize),
1565 self.lock(8usize),
1566 self.lock(9usize),
1567 self.lock(10usize),
1568 self.lock(11usize),
1569 self.lock(12usize),
1570 self.lock(13usize),
1571 self.lock(14usize),
1572 self.lock(15usize),
1573 ],
1574 )
1575 .finish()
1576 }
1577 }
1578 #[cfg(feature = "defmt")]
1579 impl defmt::Format for Rcfglockr {
1580 fn format(&self, f: defmt::Formatter) {
1581 #[derive(defmt :: Format)]
1582 struct Rcfglockr {
1583 lock: [bool; 16usize],
1584 }
1585 let proxy = Rcfglockr {
1586 lock: [
1587 self.lock(0usize),
1588 self.lock(1usize),
1589 self.lock(2usize),
1590 self.lock(3usize),
1591 self.lock(4usize),
1592 self.lock(5usize),
1593 self.lock(6usize),
1594 self.lock(7usize),
1595 self.lock(8usize),
1596 self.lock(9usize),
1597 self.lock(10usize),
1598 self.lock(11usize),
1599 self.lock(12usize),
1600 self.lock(13usize),
1601 self.lock(14usize),
1602 self.lock(15usize),
1603 ],
1604 };
1605 defmt::write!(f, "{}", proxy)
1606 }
1607 }
1608 #[doc = "LPDMA secure configuration register"]
1609 #[repr(transparent)]
1610 #[derive(Copy, Clone, Eq, PartialEq)]
1611 pub struct Seccfgr(pub u32);
1612 impl Seccfgr {
1613 #[doc = "SEC0"]
1614 #[inline(always)]
1615 pub const fn sec(&self, n: usize) -> bool {
1616 assert!(n < 16usize);
1617 let offs = 0usize + n * 1usize;
1618 let val = (self.0 >> offs) & 0x01;
1619 val != 0
1620 }
1621 #[doc = "SEC0"]
1622 #[inline(always)]
1623 pub fn set_sec(&mut self, n: usize, val: bool) {
1624 assert!(n < 16usize);
1625 let offs = 0usize + n * 1usize;
1626 self.0 = (self.0 & !(0x01 << offs)) | (((val as u32) & 0x01) << offs);
1627 }
1628 }
1629 impl Default for Seccfgr {
1630 #[inline(always)]
1631 fn default() -> Seccfgr {
1632 Seccfgr(0)
1633 }
1634 }
1635 impl core::fmt::Debug for Seccfgr {
1636 fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
1637 f.debug_struct("Seccfgr")
1638 .field(
1639 "sec",
1640 &[
1641 self.sec(0usize),
1642 self.sec(1usize),
1643 self.sec(2usize),
1644 self.sec(3usize),
1645 self.sec(4usize),
1646 self.sec(5usize),
1647 self.sec(6usize),
1648 self.sec(7usize),
1649 self.sec(8usize),
1650 self.sec(9usize),
1651 self.sec(10usize),
1652 self.sec(11usize),
1653 self.sec(12usize),
1654 self.sec(13usize),
1655 self.sec(14usize),
1656 self.sec(15usize),
1657 ],
1658 )
1659 .finish()
1660 }
1661 }
1662 #[cfg(feature = "defmt")]
1663 impl defmt::Format for Seccfgr {
1664 fn format(&self, f: defmt::Formatter) {
1665 #[derive(defmt :: Format)]
1666 struct Seccfgr {
1667 sec: [bool; 16usize],
1668 }
1669 let proxy = Seccfgr {
1670 sec: [
1671 self.sec(0usize),
1672 self.sec(1usize),
1673 self.sec(2usize),
1674 self.sec(3usize),
1675 self.sec(4usize),
1676 self.sec(5usize),
1677 self.sec(6usize),
1678 self.sec(7usize),
1679 self.sec(8usize),
1680 self.sec(9usize),
1681 self.sec(10usize),
1682 self.sec(11usize),
1683 self.sec(12usize),
1684 self.sec(13usize),
1685 self.sec(14usize),
1686 self.sec(15usize),
1687 ],
1688 };
1689 defmt::write!(f, "{}", proxy)
1690 }
1691 }
1692}
1693pub mod vals {
1694 #[repr(u8)]
1695 #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1696 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
1697 pub enum Breq {
1698 #[doc = "the selected hardware request is driven by a peripheral with a hardware request/acknowledge protocol at a burst level."]
1699 BURST = 0x0,
1700 #[doc = "the selected hardware request is driven by a peripheral with a hardware request/acknowledge protocol at a block level (see )."]
1701 BLOCK = 0x01,
1702 }
1703 impl Breq {
1704 #[inline(always)]
1705 pub const fn from_bits(val: u8) -> Breq {
1706 unsafe { core::mem::transmute(val & 0x01) }
1707 }
1708 #[inline(always)]
1709 pub const fn to_bits(self) -> u8 {
1710 unsafe { core::mem::transmute(self) }
1711 }
1712 }
1713 impl From<u8> for Breq {
1714 #[inline(always)]
1715 fn from(val: u8) -> Breq {
1716 Breq::from_bits(val)
1717 }
1718 }
1719 impl From<Breq> for u8 {
1720 #[inline(always)]
1721 fn from(val: Breq) -> u8 {
1722 Breq::to_bits(val)
1723 }
1724 }
1725 #[repr(u8)]
1726 #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1727 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
1728 pub enum Dec {
1729 #[doc = "The address is incremented by the programmed offset."]
1730 ADD = 0x0,
1731 #[doc = "The address is decremented by the programmed offset."]
1732 SUBTRACT = 0x01,
1733 }
1734 impl Dec {
1735 #[inline(always)]
1736 pub const fn from_bits(val: u8) -> Dec {
1737 unsafe { core::mem::transmute(val & 0x01) }
1738 }
1739 #[inline(always)]
1740 pub const fn to_bits(self) -> u8 {
1741 unsafe { core::mem::transmute(self) }
1742 }
1743 }
1744 impl From<u8> for Dec {
1745 #[inline(always)]
1746 fn from(val: u8) -> Dec {
1747 Dec::from_bits(val)
1748 }
1749 }
1750 impl From<Dec> for u8 {
1751 #[inline(always)]
1752 fn from(val: Dec) -> u8 {
1753 Dec::to_bits(val)
1754 }
1755 }
1756 #[repr(u8)]
1757 #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1758 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
1759 pub enum Dreq {
1760 #[doc = "selected hardware request driven by a source peripheral (request signal taken into account by the LPDMA transfer scheduler over the source/read port)"]
1761 SOURCE_PERIPHERAL = 0x0,
1762 #[doc = "selected hardware request driven by a destination peripheral (request signal taken into account by the LPDMA transfer scheduler over the destination/write port)"]
1763 DESTINATION_PERIPHERAL = 0x01,
1764 }
1765 impl Dreq {
1766 #[inline(always)]
1767 pub const fn from_bits(val: u8) -> Dreq {
1768 unsafe { core::mem::transmute(val & 0x01) }
1769 }
1770 #[inline(always)]
1771 pub const fn to_bits(self) -> u8 {
1772 unsafe { core::mem::transmute(self) }
1773 }
1774 }
1775 impl From<u8> for Dreq {
1776 #[inline(always)]
1777 fn from(val: u8) -> Dreq {
1778 Dreq::from_bits(val)
1779 }
1780 }
1781 impl From<Dreq> for u8 {
1782 #[inline(always)]
1783 fn from(val: Dreq) -> u8 {
1784 Dreq::to_bits(val)
1785 }
1786 }
1787 #[repr(u8)]
1788 #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1789 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
1790 pub enum Dw {
1791 #[doc = "byte"]
1792 BYTE = 0x0,
1793 #[doc = "half-word (2 bytes)"]
1794 HALF_WORD = 0x01,
1795 #[doc = "word (4 bytes)"]
1796 WORD = 0x02,
1797 _RESERVED_3 = 0x03,
1798 }
1799 impl Dw {
1800 #[inline(always)]
1801 pub const fn from_bits(val: u8) -> Dw {
1802 unsafe { core::mem::transmute(val & 0x03) }
1803 }
1804 #[inline(always)]
1805 pub const fn to_bits(self) -> u8 {
1806 unsafe { core::mem::transmute(self) }
1807 }
1808 }
1809 impl From<u8> for Dw {
1810 #[inline(always)]
1811 fn from(val: u8) -> Dw {
1812 Dw::from_bits(val)
1813 }
1814 }
1815 impl From<Dw> for u8 {
1816 #[inline(always)]
1817 fn from(val: Dw) -> u8 {
1818 Dw::to_bits(val)
1819 }
1820 }
1821 #[repr(u8)]
1822 #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1823 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
1824 pub enum Lsm {
1825 #[doc = "channel executed for the full linked-list and completed at the end of the last LLI (CH\\[x\\].LLR = 0). The 16 low-significant bits of the link address are null (LA\\[15:0\\]
1826= 0) and all the update bits are null (UT1 =UB1 = UT2 = USA = UDA = ULL = 0 and UT3 = UB2 = 0 if present). Then CH\\[x\\].BR1.BNDT\\[15:0\\]
1827= 0 and CH\\[x\\].BR1.BRC\\[10:0\\]
1828= 0 if present."]
1829 RUN_TO_COMPLETION = 0x0,
1830 #[doc = "channel executed once for the current LLI"]
1831 LINK_STEP = 0x01,
1832 }
1833 impl Lsm {
1834 #[inline(always)]
1835 pub const fn from_bits(val: u8) -> Lsm {
1836 unsafe { core::mem::transmute(val & 0x01) }
1837 }
1838 #[inline(always)]
1839 pub const fn to_bits(self) -> u8 {
1840 unsafe { core::mem::transmute(self) }
1841 }
1842 }
1843 impl From<u8> for Lsm {
1844 #[inline(always)]
1845 fn from(val: u8) -> Lsm {
1846 Lsm::from_bits(val)
1847 }
1848 }
1849 impl From<Lsm> for u8 {
1850 #[inline(always)]
1851 fn from(val: Lsm) -> u8 {
1852 Lsm::to_bits(val)
1853 }
1854 }
1855 #[repr(u8)]
1856 #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1857 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
1858 pub enum Pam {
1859 #[doc = "If destination is wider: source data is transferred as right aligned, padded with 0s up to the destination data width If source is wider: source data is transferred as right aligned, left-truncated down to the destination data width"]
1860 ZERO_EXTEND_OR_LEFT_TRUNCATE = 0x0,
1861 #[doc = "If destination is wider: source data is transferred as right aligned, sign extended up to the destination data width If source is wider: source data is transferred as left-aligned, right-truncated down to the destination data width"]
1862 SIGN_EXTEND_OR_RIGHT_TRUNCATE = 0x01,
1863 #[doc = "source data is FIFO queued and packed/unpacked at the destination data width, to be transferred in a left (LSB) to right (MSB) order (named little endian) to the destination"]
1864 PACK = 0x02,
1865 _RESERVED_3 = 0x03,
1866 }
1867 impl Pam {
1868 #[inline(always)]
1869 pub const fn from_bits(val: u8) -> Pam {
1870 unsafe { core::mem::transmute(val & 0x03) }
1871 }
1872 #[inline(always)]
1873 pub const fn to_bits(self) -> u8 {
1874 unsafe { core::mem::transmute(self) }
1875 }
1876 }
1877 impl From<u8> for Pam {
1878 #[inline(always)]
1879 fn from(val: u8) -> Pam {
1880 Pam::from_bits(val)
1881 }
1882 }
1883 impl From<Pam> for u8 {
1884 #[inline(always)]
1885 fn from(val: Pam) -> u8 {
1886 Pam::to_bits(val)
1887 }
1888 }
1889 #[repr(u8)]
1890 #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1891 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
1892 pub enum Prio {
1893 #[doc = "low priority, low weight"]
1894 LOW_WITH_LOWH_WEIGHT = 0x0,
1895 #[doc = "low priority, mid weight"]
1896 LOW_WITH_MID_WEIGHT = 0x01,
1897 #[doc = "low priority, high weight"]
1898 LOW_WITH_HIGH_WEIGHT = 0x02,
1899 #[doc = "high priority"]
1900 HIGH = 0x03,
1901 }
1902 impl Prio {
1903 #[inline(always)]
1904 pub const fn from_bits(val: u8) -> Prio {
1905 unsafe { core::mem::transmute(val & 0x03) }
1906 }
1907 #[inline(always)]
1908 pub const fn to_bits(self) -> u8 {
1909 unsafe { core::mem::transmute(self) }
1910 }
1911 }
1912 impl From<u8> for Prio {
1913 #[inline(always)]
1914 fn from(val: u8) -> Prio {
1915 Prio::from_bits(val)
1916 }
1917 }
1918 impl From<Prio> for u8 {
1919 #[inline(always)]
1920 fn from(val: Prio) -> u8 {
1921 Prio::to_bits(val)
1922 }
1923 }
1924 #[repr(u8)]
1925 #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1926 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
1927 pub enum Swreq {
1928 #[doc = "no software request. The selected hardware request REQSEL\\[6:0\\]
1929is taken into account."]
1930 HARDWARE = 0x0,
1931 #[doc = "software request for a memory-to-memory transfer. The default selected hardware request as per REQSEL\\[6:0\\]
1932is ignored."]
1933 SOFTWARE = 0x01,
1934 }
1935 impl Swreq {
1936 #[inline(always)]
1937 pub const fn from_bits(val: u8) -> Swreq {
1938 unsafe { core::mem::transmute(val & 0x01) }
1939 }
1940 #[inline(always)]
1941 pub const fn to_bits(self) -> u8 {
1942 unsafe { core::mem::transmute(self) }
1943 }
1944 }
1945 impl From<u8> for Swreq {
1946 #[inline(always)]
1947 fn from(val: u8) -> Swreq {
1948 Swreq::from_bits(val)
1949 }
1950 }
1951 impl From<Swreq> for u8 {
1952 #[inline(always)]
1953 fn from(val: Swreq) -> u8 {
1954 Swreq::to_bits(val)
1955 }
1956 }
1957 #[repr(u8)]
1958 #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1959 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
1960 pub enum Tcem {
1961 #[doc = "at block level (when CH\\[x\\].BR1.BNDT\\[15:0\\]
1962= 0): the complete (and the half) transfer event is generated at the (respectively half of the) end of a block."]
1963 EACH_BLOCK = 0x0,
1964 #[doc = "channel x = 0 to 11, same as 00; channel x=12 to 15, at 2D/repeated block level (when CH\\[x\\].BR1.BRC\\[10:0\\]
1965= 0 and CH\\[x\\].BR1.BNDT\\[15:0\\]
1966= 0), the complete (and the half) transfer event is generated at the end (respectively half of the end) of the 2D/repeated block."]
1967 EACH2DBLOCK = 0x01,
1968 #[doc = "at LLI level: the complete transfer event is generated at the end of the LLI transfer, including the update of the LLI if any. The half transfer event is generated at the half of the LLI data transfer (the LLI data transfer being a block transfer or a 2D/repeated block transfer for channel x = 12 to 15), if any data transfer."]
1969 EACH_LINKED_LIST_ITEM = 0x02,
1970 #[doc = "at channel level: the complete transfer event is generated at the end of the last LLI transfer. The half transfer event is generated at the half of the data transfer of the last LLI. The last LLI updates the link address CH\\[x\\].LLR.LA\\[15:2\\]
1971to zero and clears all the CH\\[x\\].LLR update bits (UT1, UT2, UB1, USA, UDA and ULL, plus UT3 and UB2 if present). If the channel transfer is continuous/infinite, no event is generated."]
1972 LAST_LINKED_LIST_ITEM = 0x03,
1973 }
1974 impl Tcem {
1975 #[inline(always)]
1976 pub const fn from_bits(val: u8) -> Tcem {
1977 unsafe { core::mem::transmute(val & 0x03) }
1978 }
1979 #[inline(always)]
1980 pub const fn to_bits(self) -> u8 {
1981 unsafe { core::mem::transmute(self) }
1982 }
1983 }
1984 impl From<u8> for Tcem {
1985 #[inline(always)]
1986 fn from(val: u8) -> Tcem {
1987 Tcem::from_bits(val)
1988 }
1989 }
1990 impl From<Tcem> for u8 {
1991 #[inline(always)]
1992 fn from(val: Tcem) -> u8 {
1993 Tcem::to_bits(val)
1994 }
1995 }
1996 #[repr(u8)]
1997 #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1998 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
1999 pub enum Trigm {
2000 #[doc = "at block level: the first burst read of each block transfer is conditioned by one hit trigger (channel x = 12 to 15, for each block if a 2D/repeated block is configured with CH\\[x\\].BR1.BRC\\[10:0\\]
2001≠ 0)."]
2002 BLOCK = 0x0,
2003 #[doc = "channel x = 0 to 11, same as 00; channel x=12 to 15, at 2D/repeated block level, the"]
2004 _2DBLOCK = 0x01,
2005 #[doc = "at link level: a LLI link transfer is conditioned by one hit trigger. The LLI data transfer (if any) is not conditioned."]
2006 LINKED_LIST_ITEM = 0x02,
2007 #[doc = "at programmed burst level: If SWREQ = 1, each programmed burst read is conditioned by one hit trigger. If SWREQ = 0, each programmed burst that is requested by the selected peripheral, is conditioned by one hit trigger."]
2008 BURST = 0x03,
2009 }
2010 impl Trigm {
2011 #[inline(always)]
2012 pub const fn from_bits(val: u8) -> Trigm {
2013 unsafe { core::mem::transmute(val & 0x03) }
2014 }
2015 #[inline(always)]
2016 pub const fn to_bits(self) -> u8 {
2017 unsafe { core::mem::transmute(self) }
2018 }
2019 }
2020 impl From<u8> for Trigm {
2021 #[inline(always)]
2022 fn from(val: u8) -> Trigm {
2023 Trigm::from_bits(val)
2024 }
2025 }
2026 impl From<Trigm> for u8 {
2027 #[inline(always)]
2028 fn from(val: Trigm) -> u8 {
2029 Trigm::to_bits(val)
2030 }
2031 }
2032 #[repr(u8)]
2033 #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2034 #[cfg_attr(feature = "defmt", derive(defmt::Format))]
2035 pub enum Trigpol {
2036 #[doc = "no trigger (masked trigger event)"]
2037 NONE = 0x0,
2038 #[doc = "trigger on the rising edge"]
2039 RISING_EDGE = 0x01,
2040 #[doc = "trigger on the falling edge"]
2041 FALLING_EDGE = 0x02,
2042 #[doc = "same as 00"]
2043 NONE_ALT = 0x03,
2044 }
2045 impl Trigpol {
2046 #[inline(always)]
2047 pub const fn from_bits(val: u8) -> Trigpol {
2048 unsafe { core::mem::transmute(val & 0x03) }
2049 }
2050 #[inline(always)]
2051 pub const fn to_bits(self) -> u8 {
2052 unsafe { core::mem::transmute(self) }
2053 }
2054 }
2055 impl From<u8> for Trigpol {
2056 #[inline(always)]
2057 fn from(val: u8) -> Trigpol {
2058 Trigpol::from_bits(val)
2059 }
2060 }
2061 impl From<Trigpol> for u8 {
2062 #[inline(always)]
2063 fn from(val: Trigpol) -> u8 {
2064 Trigpol::to_bits(val)
2065 }
2066 }
2067}
2068