1 | #![allow (clippy::missing_safety_doc)] |
2 | #![allow (clippy::identity_op)] |
3 | #![allow (clippy::unnecessary_cast)] |
4 | #![allow (clippy::erasing_op)] |
5 | |
6 | #[derive (Copy, Clone, Eq, PartialEq)] |
7 | pub struct Channel { |
8 | ptr: *mut u8, |
9 | } |
10 | unsafe impl Send for Channel {} |
11 | unsafe impl Sync for Channel {} |
12 | impl Channel { |
13 | #[inline (always)] |
14 | pub const unsafe fn from_ptr(ptr: *mut ()) -> Self { |
15 | Self { ptr: ptr as _ } |
16 | } |
17 | #[inline (always)] |
18 | pub const fn as_ptr(&self) -> *mut () { |
19 | self.ptr as _ |
20 | } |
21 | #[doc = "GPDMA channel 15 linked-list base address register" ] |
22 | #[inline (always)] |
23 | pub const fn lbar(self) -> crate::common::Reg<regs::ChLbar, crate::common::RW> { |
24 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x0usize) as _) } |
25 | } |
26 | #[doc = "GPDMA channel 15 flag clear register" ] |
27 | #[inline (always)] |
28 | pub const fn fcr(self) -> crate::common::Reg<regs::ChFcr, crate::common::RW> { |
29 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x0cusize) as _) } |
30 | } |
31 | #[doc = "GPDMA channel 15 status register" ] |
32 | #[inline (always)] |
33 | pub const fn sr(self) -> crate::common::Reg<regs::ChSr, crate::common::RW> { |
34 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x10usize) as _) } |
35 | } |
36 | #[doc = "GPDMA channel 15 control register" ] |
37 | #[inline (always)] |
38 | pub const fn cr(self) -> crate::common::Reg<regs::ChCr, crate::common::RW> { |
39 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x14usize) as _) } |
40 | } |
41 | #[doc = "GPDMA channel 15 transfer register 1" ] |
42 | #[inline (always)] |
43 | pub const fn tr1(self) -> crate::common::Reg<regs::ChTr1, crate::common::RW> { |
44 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x40usize) as _) } |
45 | } |
46 | #[doc = "GPDMA channel 15 transfer register 2" ] |
47 | #[inline (always)] |
48 | pub const fn tr2(self) -> crate::common::Reg<regs::ChTr2, crate::common::RW> { |
49 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x44usize) as _) } |
50 | } |
51 | #[doc = "GPDMA channel 15 alternate block register 1" ] |
52 | #[inline (always)] |
53 | pub const fn br1(self) -> crate::common::Reg<regs::ChBr1, crate::common::RW> { |
54 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x48usize) as _) } |
55 | } |
56 | #[doc = "GPDMA channel 15 source address register" ] |
57 | #[inline (always)] |
58 | pub const fn sar(self) -> crate::common::Reg<u32, crate::common::RW> { |
59 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x4cusize) as _) } |
60 | } |
61 | #[doc = "GPDMA channel 15 destination address register" ] |
62 | #[inline (always)] |
63 | pub const fn dar(self) -> crate::common::Reg<u32, crate::common::RW> { |
64 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x50usize) as _) } |
65 | } |
66 | #[doc = "GPDMA channel 15 transfer register 3" ] |
67 | #[inline (always)] |
68 | pub const fn tr3(self) -> crate::common::Reg<regs::ChTr3, crate::common::RW> { |
69 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x54usize) as _) } |
70 | } |
71 | #[doc = "GPDMA channel 15 block register 2" ] |
72 | #[inline (always)] |
73 | pub const fn br2(self) -> crate::common::Reg<regs::ChBr2, crate::common::RW> { |
74 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x58usize) as _) } |
75 | } |
76 | #[doc = "GPDMA channel 15 alternate linked-list address register" ] |
77 | #[inline (always)] |
78 | pub const fn llr(self) -> crate::common::Reg<regs::ChLlr, crate::common::RW> { |
79 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x7cusize) as _) } |
80 | } |
81 | } |
82 | #[doc = "GPDMA" ] |
83 | #[derive (Copy, Clone, Eq, PartialEq)] |
84 | pub struct Gpdma { |
85 | ptr: *mut u8, |
86 | } |
87 | unsafe impl Send for Gpdma {} |
88 | unsafe impl Sync for Gpdma {} |
89 | impl Gpdma { |
90 | #[inline (always)] |
91 | pub const unsafe fn from_ptr(ptr: *mut ()) -> Self { |
92 | Self { ptr: ptr as _ } |
93 | } |
94 | #[inline (always)] |
95 | pub const fn as_ptr(&self) -> *mut () { |
96 | self.ptr as _ |
97 | } |
98 | #[doc = "GPDMA secure configuration register" ] |
99 | #[inline (always)] |
100 | pub const fn seccfgr(self) -> crate::common::Reg<regs::Seccfgr, crate::common::RW> { |
101 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x0usize) as _) } |
102 | } |
103 | #[doc = "GPDMA privileged configuration register" ] |
104 | #[inline (always)] |
105 | pub const fn privcfgr(self) -> crate::common::Reg<regs::Privcfgr, crate::common::RW> { |
106 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x04usize) as _) } |
107 | } |
108 | #[doc = "GPDMA configuration lock register" ] |
109 | #[inline (always)] |
110 | pub const fn rcfglockr(self) -> crate::common::Reg<regs::Rcfglockr, crate::common::RW> { |
111 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x08usize) as _) } |
112 | } |
113 | #[doc = "GPDMA non-secure masked interrupt status register" ] |
114 | #[inline (always)] |
115 | pub const fn misr(self) -> crate::common::Reg<regs::Misr, crate::common::RW> { |
116 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x0cusize) as _) } |
117 | } |
118 | #[doc = "GPDMA secure masked interrupt status register" ] |
119 | #[inline (always)] |
120 | pub const fn smisr(self) -> crate::common::Reg<regs::Misr, crate::common::RW> { |
121 | unsafe { crate::common::Reg::from_ptr(self.ptr.add(0x10usize) as _) } |
122 | } |
123 | #[inline (always)] |
124 | pub const fn ch(self, n: usize) -> Channel { |
125 | assert!(n < 16usize); |
126 | unsafe { Channel::from_ptr(self.ptr.add(0x50usize + n * 128usize) as _) } |
127 | } |
128 | } |
129 | pub mod regs { |
130 | #[doc = "GPDMA channel 15 alternate block register 1" ] |
131 | #[repr (transparent)] |
132 | #[derive (Copy, Clone, Eq, PartialEq)] |
133 | pub struct ChBr1(pub u32); |
134 | impl ChBr1 { |
135 | #[doc = "block number of data bytes to transfer from the source. Block size transferred from the source. When the channel is enabled, this field becomes read-only and is decremented, indicating the remaining number of data items in the current source block to be transferred. BNDT \\[15:0 \\] |
136 | is programmed in number of bytes, maximum source block size is 64 Kbytes -1. Once the last data transfer is completed (BNDT \\[15:0 \\] |
137 | = 0): - if CH \\[x \\].LLR.UB1 = 1, this field is updated by the LLI in the memory. - if CH \\[x \\].LLR.UB1 = 0 and if there is at least one not null Uxx update bit, this field is internally restored to the programmed value. - if all CH \\[x \\].LLR.Uxx = 0 and if CH \\[x \\].LLR.LA \\[15:0 \\] |
138 | ≠ 0, this field is internally restored to the programmed value (infinite/continuous last LLI). - if CH \\[x \\].LLR = 0, this field is kept as zero following the last LLI data transfer. Note: A non-null source block size must be a multiple of the source data width (BNDT \\[2:0 \\] |
139 | versus CH \\[x \\].TR1.SDW_LOG2 \\[1:0 \\]). Else a user setting error is reported and no transfer is issued. When configured in packing mode (CH \\[x \\].TR1.PAM \\[1 \\]=1 and destination data width different from source data width), a non-null source block size must be a multiple of the destination data width (BNDT \\[2:0 \\] |
140 | versus CH \\[x \\].TR1.DDW \\[1:0 \\]). Else a user setting error is reported and no transfer is issued." ] |
141 | #[inline (always)] |
142 | pub const fn bndt(&self) -> u16 { |
143 | let val = (self.0 >> 0usize) & 0xffff; |
144 | val as u16 |
145 | } |
146 | #[doc = "block number of data bytes to transfer from the source. Block size transferred from the source. When the channel is enabled, this field becomes read-only and is decremented, indicating the remaining number of data items in the current source block to be transferred. BNDT \\[15:0 \\] |
147 | is programmed in number of bytes, maximum source block size is 64 Kbytes -1. Once the last data transfer is completed (BNDT \\[15:0 \\] |
148 | = 0): - if CH \\[x \\].LLR.UB1 = 1, this field is updated by the LLI in the memory. - if CH \\[x \\].LLR.UB1 = 0 and if there is at least one not null Uxx update bit, this field is internally restored to the programmed value. - if all CH \\[x \\].LLR.Uxx = 0 and if CH \\[x \\].LLR.LA \\[15:0 \\] |
149 | ≠ 0, this field is internally restored to the programmed value (infinite/continuous last LLI). - if CH \\[x \\].LLR = 0, this field is kept as zero following the last LLI data transfer. Note: A non-null source block size must be a multiple of the source data width (BNDT \\[2:0 \\] |
150 | versus CH \\[x \\].TR1.SDW_LOG2 \\[1:0 \\]). Else a user setting error is reported and no transfer is issued. When configured in packing mode (CH \\[x \\].TR1.PAM \\[1 \\]=1 and destination data width different from source data width), a non-null source block size must be a multiple of the destination data width (BNDT \\[2:0 \\] |
151 | versus CH \\[x \\].TR1.DDW \\[1:0 \\]). Else a user setting error is reported and no transfer is issued." ] |
152 | #[inline (always)] |
153 | pub fn set_bndt(&mut self, val: u16) { |
154 | self.0 = (self.0 & !(0xffff << 0usize)) | (((val as u32) & 0xffff) << 0usize); |
155 | } |
156 | #[doc = "Block repeat counter. This field contains the number of repetitions of the current block (0 to 2047). When the channel is enabled, this field becomes read-only. After decrements, this field indicates the remaining number of blocks, excluding the current one. This counter is hardware decremented for each completed block transfer. Once the last block transfer is completed (BRC \\[10:0 \\] |
157 | = BNDT \\[15:0 \\] |
158 | = 0): If CH \\[x \\].LLR.UB1 = 1, all CH \\[x \\].BR1 fields are updated by the next LLI in the memory. If CH \\[x \\].LLR.UB1 = 0 and if there is at least one not null Uxx update bit, this field is internally restored to the programmed value. if all CH \\[x \\].LLR.Uxx = 0 and if CH \\[x \\].LLR.LA \\[15:0 \\] |
159 | ≠ 0, this field is internally restored to the programmed value (infinite/continuous last LLI). if CH \\[x \\].LLR = 0, this field is kept as zero following the last LLI and data transfer." ] |
160 | #[inline (always)] |
161 | pub const fn brc(&self) -> u16 { |
162 | let val = (self.0 >> 16usize) & 0x07ff; |
163 | val as u16 |
164 | } |
165 | #[doc = "Block repeat counter. This field contains the number of repetitions of the current block (0 to 2047). When the channel is enabled, this field becomes read-only. After decrements, this field indicates the remaining number of blocks, excluding the current one. This counter is hardware decremented for each completed block transfer. Once the last block transfer is completed (BRC \\[10:0 \\] |
166 | = BNDT \\[15:0 \\] |
167 | = 0): If CH \\[x \\].LLR.UB1 = 1, all CH \\[x \\].BR1 fields are updated by the next LLI in the memory. If CH \\[x \\].LLR.UB1 = 0 and if there is at least one not null Uxx update bit, this field is internally restored to the programmed value. if all CH \\[x \\].LLR.Uxx = 0 and if CH \\[x \\].LLR.LA \\[15:0 \\] |
168 | ≠ 0, this field is internally restored to the programmed value (infinite/continuous last LLI). if CH \\[x \\].LLR = 0, this field is kept as zero following the last LLI and data transfer." ] |
169 | #[inline (always)] |
170 | pub fn set_brc(&mut self, val: u16) { |
171 | self.0 = (self.0 & !(0x07ff << 16usize)) | (((val as u32) & 0x07ff) << 16usize); |
172 | } |
173 | #[doc = "source address decrement" ] |
174 | #[inline (always)] |
175 | pub const fn sdec(&self) -> super::vals::Dec { |
176 | let val = (self.0 >> 28usize) & 0x01; |
177 | super::vals::Dec::from_bits(val as u8) |
178 | } |
179 | #[doc = "source address decrement" ] |
180 | #[inline (always)] |
181 | pub fn set_sdec(&mut self, val: super::vals::Dec) { |
182 | self.0 = (self.0 & !(0x01 << 28usize)) | (((val.to_bits() as u32) & 0x01) << 28usize); |
183 | } |
184 | #[doc = "destination address decrement" ] |
185 | #[inline (always)] |
186 | pub const fn ddec(&self) -> super::vals::Dec { |
187 | let val = (self.0 >> 29usize) & 0x01; |
188 | super::vals::Dec::from_bits(val as u8) |
189 | } |
190 | #[doc = "destination address decrement" ] |
191 | #[inline (always)] |
192 | pub fn set_ddec(&mut self, val: super::vals::Dec) { |
193 | self.0 = (self.0 & !(0x01 << 29usize)) | (((val.to_bits() as u32) & 0x01) << 29usize); |
194 | } |
195 | #[doc = "Block repeat source address decrement. Note: On top of this increment/decrement (depending on BRSDEC), CH \\[x \\].SAR is in the same time also updated by the increment/decrement (depending on SDEC) of the CH \\[x \\].TR3.SAO value, as it is done after any programmed burst transfer." ] |
196 | #[inline (always)] |
197 | pub const fn brsdec(&self) -> super::vals::Dec { |
198 | let val = (self.0 >> 30usize) & 0x01; |
199 | super::vals::Dec::from_bits(val as u8) |
200 | } |
201 | #[doc = "Block repeat source address decrement. Note: On top of this increment/decrement (depending on BRSDEC), CH \\[x \\].SAR is in the same time also updated by the increment/decrement (depending on SDEC) of the CH \\[x \\].TR3.SAO value, as it is done after any programmed burst transfer." ] |
202 | #[inline (always)] |
203 | pub fn set_brsdec(&mut self, val: super::vals::Dec) { |
204 | self.0 = (self.0 & !(0x01 << 30usize)) | (((val.to_bits() as u32) & 0x01) << 30usize); |
205 | } |
206 | #[doc = "Block repeat destination address decrement. Note: On top of this increment/decrement (depending on BRDDEC), CH \\[x \\].DAR is in the same time also updated by the increment/decrement (depending on DDEC) of the CH \\[x \\].TR3.DAO value, as it is usually done at the end of each programmed burst transfer." ] |
207 | #[inline (always)] |
208 | pub const fn brddec(&self) -> super::vals::Dec { |
209 | let val = (self.0 >> 31usize) & 0x01; |
210 | super::vals::Dec::from_bits(val as u8) |
211 | } |
212 | #[doc = "Block repeat destination address decrement. Note: On top of this increment/decrement (depending on BRDDEC), CH \\[x \\].DAR is in the same time also updated by the increment/decrement (depending on DDEC) of the CH \\[x \\].TR3.DAO value, as it is usually done at the end of each programmed burst transfer." ] |
213 | #[inline (always)] |
214 | pub fn set_brddec(&mut self, val: super::vals::Dec) { |
215 | self.0 = (self.0 & !(0x01 << 31usize)) | (((val.to_bits() as u32) & 0x01) << 31usize); |
216 | } |
217 | } |
218 | impl Default for ChBr1 { |
219 | #[inline (always)] |
220 | fn default() -> ChBr1 { |
221 | ChBr1(0) |
222 | } |
223 | } |
224 | impl core::fmt::Debug for ChBr1 { |
225 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
226 | f.debug_struct("ChBr1" ) |
227 | .field("bndt" , &self.bndt()) |
228 | .field("brc" , &self.brc()) |
229 | .field("sdec" , &self.sdec()) |
230 | .field("ddec" , &self.ddec()) |
231 | .field("brsdec" , &self.brsdec()) |
232 | .field("brddec" , &self.brddec()) |
233 | .finish() |
234 | } |
235 | } |
236 | #[cfg (feature = "defmt" )] |
237 | impl defmt::Format for ChBr1 { |
238 | fn format(&self, f: defmt::Formatter) { |
239 | #[derive (defmt :: Format)] |
240 | struct ChBr1 { |
241 | bndt: u16, |
242 | brc: u16, |
243 | sdec: super::vals::Dec, |
244 | ddec: super::vals::Dec, |
245 | brsdec: super::vals::Dec, |
246 | brddec: super::vals::Dec, |
247 | } |
248 | let proxy = ChBr1 { |
249 | bndt: self.bndt(), |
250 | brc: self.brc(), |
251 | sdec: self.sdec(), |
252 | ddec: self.ddec(), |
253 | brsdec: self.brsdec(), |
254 | brddec: self.brddec(), |
255 | }; |
256 | defmt::write!(f, "{}" , proxy) |
257 | } |
258 | } |
259 | #[doc = "GPDMA channel 12 block register 2" ] |
260 | #[repr (transparent)] |
261 | #[derive (Copy, Clone, Eq, PartialEq)] |
262 | pub struct ChBr2(pub u32); |
263 | impl ChBr2 { |
264 | #[doc = "Block repeated source address offset. For a channel with 2D addressing capability, this field is used to update (by addition or subtraction depending on CH \\[x \\].BR1.BRSDEC) the current source address (CH \\[x \\].SAR) at the end of a block transfer. Note: A block repeated source address offset must be aligned with the programmed data width of a source burst (BRSAO \\[2:0 \\] |
265 | versus CH \\[x \\].TR1.SDW_LOG2 \\[1:0 \\]). Else a user setting error is reported and no transfer is issued." ] |
266 | #[inline (always)] |
267 | pub const fn brsao(&self) -> u16 { |
268 | let val = (self.0 >> 0usize) & 0xffff; |
269 | val as u16 |
270 | } |
271 | #[doc = "Block repeated source address offset. For a channel with 2D addressing capability, this field is used to update (by addition or subtraction depending on CH \\[x \\].BR1.BRSDEC) the current source address (CH \\[x \\].SAR) at the end of a block transfer. Note: A block repeated source address offset must be aligned with the programmed data width of a source burst (BRSAO \\[2:0 \\] |
272 | versus CH \\[x \\].TR1.SDW_LOG2 \\[1:0 \\]). Else a user setting error is reported and no transfer is issued." ] |
273 | #[inline (always)] |
274 | pub fn set_brsao(&mut self, val: u16) { |
275 | self.0 = (self.0 & !(0xffff << 0usize)) | (((val as u32) & 0xffff) << 0usize); |
276 | } |
277 | #[doc = "Block repeated destination address offset. For a channel with 2D addressing capability, this field is used to update (by addition or subtraction depending on CH \\[x \\].BR1.BRDDEC) the current destination address (CH \\[x \\].DAR) at the end of a block transfer. Note: A block repeated destination address offset must be aligned with the programmed data width of a destination burst (BRDAO \\[2:0 \\] |
278 | versus CH \\[x \\].TR1.DDW \\[1:0 \\]). Else a user setting error is reported and no transfer is issued." ] |
279 | #[inline (always)] |
280 | pub const fn brdao(&self) -> u16 { |
281 | let val = (self.0 >> 16usize) & 0xffff; |
282 | val as u16 |
283 | } |
284 | #[doc = "Block repeated destination address offset. For a channel with 2D addressing capability, this field is used to update (by addition or subtraction depending on CH \\[x \\].BR1.BRDDEC) the current destination address (CH \\[x \\].DAR) at the end of a block transfer. Note: A block repeated destination address offset must be aligned with the programmed data width of a destination burst (BRDAO \\[2:0 \\] |
285 | versus CH \\[x \\].TR1.DDW \\[1:0 \\]). Else a user setting error is reported and no transfer is issued." ] |
286 | #[inline (always)] |
287 | pub fn set_brdao(&mut self, val: u16) { |
288 | self.0 = (self.0 & !(0xffff << 16usize)) | (((val as u32) & 0xffff) << 16usize); |
289 | } |
290 | } |
291 | impl Default for ChBr2 { |
292 | #[inline (always)] |
293 | fn default() -> ChBr2 { |
294 | ChBr2(0) |
295 | } |
296 | } |
297 | impl core::fmt::Debug for ChBr2 { |
298 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
299 | f.debug_struct("ChBr2" ) |
300 | .field("brsao" , &self.brsao()) |
301 | .field("brdao" , &self.brdao()) |
302 | .finish() |
303 | } |
304 | } |
305 | #[cfg (feature = "defmt" )] |
306 | impl defmt::Format for ChBr2 { |
307 | fn format(&self, f: defmt::Formatter) { |
308 | #[derive (defmt :: Format)] |
309 | struct ChBr2 { |
310 | brsao: u16, |
311 | brdao: u16, |
312 | } |
313 | let proxy = ChBr2 { |
314 | brsao: self.brsao(), |
315 | brdao: self.brdao(), |
316 | }; |
317 | defmt::write!(f, "{}" , proxy) |
318 | } |
319 | } |
320 | #[doc = "GPDMA channel 11 control register" ] |
321 | #[repr (transparent)] |
322 | #[derive (Copy, Clone, Eq, PartialEq)] |
323 | pub struct ChCr(pub u32); |
324 | impl ChCr { |
325 | #[doc = "enable. Writing 1 into the field RESET (bit 1) causes the hardware to de-assert this bit, whatever is written into this bit 0. Else: this bit is de-asserted by hardware when there is a transfer error (master bus error or user setting error) or when there is a channel transfer complete (channel ready to be configured, e.g. if LSM=1 at the end of a single execution of the LLI). Else, this bit can be asserted by software. Writing 0 into this EN bit is ignored." ] |
326 | #[inline (always)] |
327 | pub const fn en(&self) -> bool { |
328 | let val = (self.0 >> 0usize) & 0x01; |
329 | val != 0 |
330 | } |
331 | #[doc = "enable. Writing 1 into the field RESET (bit 1) causes the hardware to de-assert this bit, whatever is written into this bit 0. Else: this bit is de-asserted by hardware when there is a transfer error (master bus error or user setting error) or when there is a channel transfer complete (channel ready to be configured, e.g. if LSM=1 at the end of a single execution of the LLI). Else, this bit can be asserted by software. Writing 0 into this EN bit is ignored." ] |
332 | #[inline (always)] |
333 | pub fn set_en(&mut self, val: bool) { |
334 | self.0 = (self.0 & !(0x01 << 0usize)) | (((val as u32) & 0x01) << 0usize); |
335 | } |
336 | #[doc = "reset. This bit is write only. Writing 0 has no impact. Writing 1 implies the reset of the following: the FIFO, the channel internal state, SUSP and EN bits (whatever is written receptively in bit 2 and bit 0). The reset is effective when the channel is in steady state, meaning one of the following: - active channel in suspended state (CH \\[x \\].SR.SUSPF = 1 and CH \\[x \\].SR.IDLEF = CH \\[x \\].CR.EN = 1). - channel in disabled state (CH \\[x \\].SR.IDLEF = 1 and CH \\[x \\].CR.EN = 0). After writing a RESET, to continue using this channel, the user must explicitly reconfigure the channel including the hardware-modified configuration registers (CH \\[x \\].BR1, CH \\[x \\].SAR and CH \\[x \\].DAR) before enabling again the channel (see the programming sequence in )." ] |
337 | #[inline (always)] |
338 | pub const fn reset(&self) -> bool { |
339 | let val = (self.0 >> 1usize) & 0x01; |
340 | val != 0 |
341 | } |
342 | #[doc = "reset. This bit is write only. Writing 0 has no impact. Writing 1 implies the reset of the following: the FIFO, the channel internal state, SUSP and EN bits (whatever is written receptively in bit 2 and bit 0). The reset is effective when the channel is in steady state, meaning one of the following: - active channel in suspended state (CH \\[x \\].SR.SUSPF = 1 and CH \\[x \\].SR.IDLEF = CH \\[x \\].CR.EN = 1). - channel in disabled state (CH \\[x \\].SR.IDLEF = 1 and CH \\[x \\].CR.EN = 0). After writing a RESET, to continue using this channel, the user must explicitly reconfigure the channel including the hardware-modified configuration registers (CH \\[x \\].BR1, CH \\[x \\].SAR and CH \\[x \\].DAR) before enabling again the channel (see the programming sequence in )." ] |
343 | #[inline (always)] |
344 | pub fn set_reset(&mut self, val: bool) { |
345 | self.0 = (self.0 & !(0x01 << 1usize)) | (((val as u32) & 0x01) << 1usize); |
346 | } |
347 | #[doc = "suspend. Writing 1 into the field RESET (bit 1) causes the hardware to de-assert this bit, whatever is written into this bit 2. Else: Software must write 1 in order to suspend an active channel i.e. a channel with an on-going GPDMA transfer over its master ports. The software must write 0 in order to resume a suspended channel, following the programming sequence detailed in ." ] |
348 | #[inline (always)] |
349 | pub const fn susp(&self) -> bool { |
350 | let val = (self.0 >> 2usize) & 0x01; |
351 | val != 0 |
352 | } |
353 | #[doc = "suspend. Writing 1 into the field RESET (bit 1) causes the hardware to de-assert this bit, whatever is written into this bit 2. Else: Software must write 1 in order to suspend an active channel i.e. a channel with an on-going GPDMA transfer over its master ports. The software must write 0 in order to resume a suspended channel, following the programming sequence detailed in ." ] |
354 | #[inline (always)] |
355 | pub fn set_susp(&mut self, val: bool) { |
356 | self.0 = (self.0 & !(0x01 << 2usize)) | (((val as u32) & 0x01) << 2usize); |
357 | } |
358 | #[doc = "transfer complete interrupt enable" ] |
359 | #[inline (always)] |
360 | pub const fn tcie(&self) -> bool { |
361 | let val = (self.0 >> 8usize) & 0x01; |
362 | val != 0 |
363 | } |
364 | #[doc = "transfer complete interrupt enable" ] |
365 | #[inline (always)] |
366 | pub fn set_tcie(&mut self, val: bool) { |
367 | self.0 = (self.0 & !(0x01 << 8usize)) | (((val as u32) & 0x01) << 8usize); |
368 | } |
369 | #[doc = "half transfer complete interrupt enable" ] |
370 | #[inline (always)] |
371 | pub const fn htie(&self) -> bool { |
372 | let val = (self.0 >> 9usize) & 0x01; |
373 | val != 0 |
374 | } |
375 | #[doc = "half transfer complete interrupt enable" ] |
376 | #[inline (always)] |
377 | pub fn set_htie(&mut self, val: bool) { |
378 | self.0 = (self.0 & !(0x01 << 9usize)) | (((val as u32) & 0x01) << 9usize); |
379 | } |
380 | #[doc = "data transfer error interrupt enable" ] |
381 | #[inline (always)] |
382 | pub const fn dteie(&self) -> bool { |
383 | let val = (self.0 >> 10usize) & 0x01; |
384 | val != 0 |
385 | } |
386 | #[doc = "data transfer error interrupt enable" ] |
387 | #[inline (always)] |
388 | pub fn set_dteie(&mut self, val: bool) { |
389 | self.0 = (self.0 & !(0x01 << 10usize)) | (((val as u32) & 0x01) << 10usize); |
390 | } |
391 | #[doc = "update link transfer error interrupt enable" ] |
392 | #[inline (always)] |
393 | pub const fn uleie(&self) -> bool { |
394 | let val = (self.0 >> 11usize) & 0x01; |
395 | val != 0 |
396 | } |
397 | #[doc = "update link transfer error interrupt enable" ] |
398 | #[inline (always)] |
399 | pub fn set_uleie(&mut self, val: bool) { |
400 | self.0 = (self.0 & !(0x01 << 11usize)) | (((val as u32) & 0x01) << 11usize); |
401 | } |
402 | #[doc = "user setting error interrupt enable" ] |
403 | #[inline (always)] |
404 | pub const fn useie(&self) -> bool { |
405 | let val = (self.0 >> 12usize) & 0x01; |
406 | val != 0 |
407 | } |
408 | #[doc = "user setting error interrupt enable" ] |
409 | #[inline (always)] |
410 | pub fn set_useie(&mut self, val: bool) { |
411 | self.0 = (self.0 & !(0x01 << 12usize)) | (((val as u32) & 0x01) << 12usize); |
412 | } |
413 | #[doc = "completed suspension interrupt enable" ] |
414 | #[inline (always)] |
415 | pub const fn suspie(&self) -> bool { |
416 | let val = (self.0 >> 13usize) & 0x01; |
417 | val != 0 |
418 | } |
419 | #[doc = "completed suspension interrupt enable" ] |
420 | #[inline (always)] |
421 | pub fn set_suspie(&mut self, val: bool) { |
422 | self.0 = (self.0 & !(0x01 << 13usize)) | (((val as u32) & 0x01) << 13usize); |
423 | } |
424 | #[doc = "trigger overrun interrupt enable" ] |
425 | #[inline (always)] |
426 | pub const fn toie(&self) -> bool { |
427 | let val = (self.0 >> 14usize) & 0x01; |
428 | val != 0 |
429 | } |
430 | #[doc = "trigger overrun interrupt enable" ] |
431 | #[inline (always)] |
432 | pub fn set_toie(&mut self, val: bool) { |
433 | self.0 = (self.0 & !(0x01 << 14usize)) | (((val as u32) & 0x01) << 14usize); |
434 | } |
435 | #[doc = "Link step mode. First the (possible 1D/repeated) block transfer is executed as defined by the current internal register file until CH \\[x \\].BR1.BNDT \\[15:0 \\] |
436 | = 0 and CH \\[x \\].BR1.BRC \\[10:0 \\] |
437 | = 0 if present. Secondly the next linked-list data structure is conditionally uploaded from memory as defined by CH \\[x \\].LLR. Then channel execution is completed. Note: This bit must be written when EN=0. This bit is read-only when EN=1." ] |
438 | #[inline (always)] |
439 | pub const fn lsm(&self) -> super::vals::Lsm { |
440 | let val = (self.0 >> 16usize) & 0x01; |
441 | super::vals::Lsm::from_bits(val as u8) |
442 | } |
443 | #[doc = "Link step mode. First the (possible 1D/repeated) block transfer is executed as defined by the current internal register file until CH \\[x \\].BR1.BNDT \\[15:0 \\] |
444 | = 0 and CH \\[x \\].BR1.BRC \\[10:0 \\] |
445 | = 0 if present. Secondly the next linked-list data structure is conditionally uploaded from memory as defined by CH \\[x \\].LLR. Then channel execution is completed. Note: This bit must be written when EN=0. This bit is read-only when EN=1." ] |
446 | #[inline (always)] |
447 | pub fn set_lsm(&mut self, val: super::vals::Lsm) { |
448 | self.0 = (self.0 & !(0x01 << 16usize)) | (((val.to_bits() as u32) & 0x01) << 16usize); |
449 | } |
450 | #[doc = "linked-list allocated port. This bit is used to allocate the master port for the update of the GPDMA linked-list registers from the memory. Note: This bit must be written when EN=0. This bit is read-only when EN=1." ] |
451 | #[inline (always)] |
452 | pub const fn lap(&self) -> super::vals::Ap { |
453 | let val = (self.0 >> 17usize) & 0x01; |
454 | super::vals::Ap::from_bits(val as u8) |
455 | } |
456 | #[doc = "linked-list allocated port. This bit is used to allocate the master port for the update of the GPDMA linked-list registers from the memory. Note: This bit must be written when EN=0. This bit is read-only when EN=1." ] |
457 | #[inline (always)] |
458 | pub fn set_lap(&mut self, val: super::vals::Ap) { |
459 | self.0 = (self.0 & !(0x01 << 17usize)) | (((val.to_bits() as u32) & 0x01) << 17usize); |
460 | } |
461 | #[doc = "priority level of the channel x GPDMA transfer versus others. Note: This bit must be written when EN = 0. This bit is read-only when EN = 1." ] |
462 | #[inline (always)] |
463 | pub const fn prio(&self) -> super::vals::Prio { |
464 | let val = (self.0 >> 22usize) & 0x03; |
465 | super::vals::Prio::from_bits(val as u8) |
466 | } |
467 | #[doc = "priority level of the channel x GPDMA transfer versus others. Note: This bit must be written when EN = 0. This bit is read-only when EN = 1." ] |
468 | #[inline (always)] |
469 | pub fn set_prio(&mut self, val: super::vals::Prio) { |
470 | self.0 = (self.0 & !(0x03 << 22usize)) | (((val.to_bits() as u32) & 0x03) << 22usize); |
471 | } |
472 | } |
473 | impl Default for ChCr { |
474 | #[inline (always)] |
475 | fn default() -> ChCr { |
476 | ChCr(0) |
477 | } |
478 | } |
479 | impl core::fmt::Debug for ChCr { |
480 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
481 | f.debug_struct("ChCr" ) |
482 | .field("en" , &self.en()) |
483 | .field("reset" , &self.reset()) |
484 | .field("susp" , &self.susp()) |
485 | .field("tcie" , &self.tcie()) |
486 | .field("htie" , &self.htie()) |
487 | .field("dteie" , &self.dteie()) |
488 | .field("uleie" , &self.uleie()) |
489 | .field("useie" , &self.useie()) |
490 | .field("suspie" , &self.suspie()) |
491 | .field("toie" , &self.toie()) |
492 | .field("lsm" , &self.lsm()) |
493 | .field("lap" , &self.lap()) |
494 | .field("prio" , &self.prio()) |
495 | .finish() |
496 | } |
497 | } |
498 | #[cfg (feature = "defmt" )] |
499 | impl defmt::Format for ChCr { |
500 | fn format(&self, f: defmt::Formatter) { |
501 | #[derive (defmt :: Format)] |
502 | struct ChCr { |
503 | en: bool, |
504 | reset: bool, |
505 | susp: bool, |
506 | tcie: bool, |
507 | htie: bool, |
508 | dteie: bool, |
509 | uleie: bool, |
510 | useie: bool, |
511 | suspie: bool, |
512 | toie: bool, |
513 | lsm: super::vals::Lsm, |
514 | lap: super::vals::Ap, |
515 | prio: super::vals::Prio, |
516 | } |
517 | let proxy = ChCr { |
518 | en: self.en(), |
519 | reset: self.reset(), |
520 | susp: self.susp(), |
521 | tcie: self.tcie(), |
522 | htie: self.htie(), |
523 | dteie: self.dteie(), |
524 | uleie: self.uleie(), |
525 | useie: self.useie(), |
526 | suspie: self.suspie(), |
527 | toie: self.toie(), |
528 | lsm: self.lsm(), |
529 | lap: self.lap(), |
530 | prio: self.prio(), |
531 | }; |
532 | defmt::write!(f, "{}" , proxy) |
533 | } |
534 | } |
535 | #[doc = "GPDMA channel 7 flag clear register" ] |
536 | #[repr (transparent)] |
537 | #[derive (Copy, Clone, Eq, PartialEq)] |
538 | pub struct ChFcr(pub u32); |
539 | impl ChFcr { |
540 | #[doc = "transfer complete flag clear" ] |
541 | #[inline (always)] |
542 | pub const fn tcf(&self) -> bool { |
543 | let val = (self.0 >> 8usize) & 0x01; |
544 | val != 0 |
545 | } |
546 | #[doc = "transfer complete flag clear" ] |
547 | #[inline (always)] |
548 | pub fn set_tcf(&mut self, val: bool) { |
549 | self.0 = (self.0 & !(0x01 << 8usize)) | (((val as u32) & 0x01) << 8usize); |
550 | } |
551 | #[doc = "half transfer flag clear" ] |
552 | #[inline (always)] |
553 | pub const fn htf(&self) -> bool { |
554 | let val = (self.0 >> 9usize) & 0x01; |
555 | val != 0 |
556 | } |
557 | #[doc = "half transfer flag clear" ] |
558 | #[inline (always)] |
559 | pub fn set_htf(&mut self, val: bool) { |
560 | self.0 = (self.0 & !(0x01 << 9usize)) | (((val as u32) & 0x01) << 9usize); |
561 | } |
562 | #[doc = "data transfer error flag clear" ] |
563 | #[inline (always)] |
564 | pub const fn dtef(&self) -> bool { |
565 | let val = (self.0 >> 10usize) & 0x01; |
566 | val != 0 |
567 | } |
568 | #[doc = "data transfer error flag clear" ] |
569 | #[inline (always)] |
570 | pub fn set_dtef(&mut self, val: bool) { |
571 | self.0 = (self.0 & !(0x01 << 10usize)) | (((val as u32) & 0x01) << 10usize); |
572 | } |
573 | #[doc = "update link transfer error flag clear" ] |
574 | #[inline (always)] |
575 | pub const fn ulef(&self) -> bool { |
576 | let val = (self.0 >> 11usize) & 0x01; |
577 | val != 0 |
578 | } |
579 | #[doc = "update link transfer error flag clear" ] |
580 | #[inline (always)] |
581 | pub fn set_ulef(&mut self, val: bool) { |
582 | self.0 = (self.0 & !(0x01 << 11usize)) | (((val as u32) & 0x01) << 11usize); |
583 | } |
584 | #[doc = "user setting error flag clear" ] |
585 | #[inline (always)] |
586 | pub const fn usef(&self) -> bool { |
587 | let val = (self.0 >> 12usize) & 0x01; |
588 | val != 0 |
589 | } |
590 | #[doc = "user setting error flag clear" ] |
591 | #[inline (always)] |
592 | pub fn set_usef(&mut self, val: bool) { |
593 | self.0 = (self.0 & !(0x01 << 12usize)) | (((val as u32) & 0x01) << 12usize); |
594 | } |
595 | #[doc = "completed suspension flag clear" ] |
596 | #[inline (always)] |
597 | pub const fn suspf(&self) -> bool { |
598 | let val = (self.0 >> 13usize) & 0x01; |
599 | val != 0 |
600 | } |
601 | #[doc = "completed suspension flag clear" ] |
602 | #[inline (always)] |
603 | pub fn set_suspf(&mut self, val: bool) { |
604 | self.0 = (self.0 & !(0x01 << 13usize)) | (((val as u32) & 0x01) << 13usize); |
605 | } |
606 | #[doc = "trigger overrun flag clear" ] |
607 | #[inline (always)] |
608 | pub const fn tof(&self) -> bool { |
609 | let val = (self.0 >> 14usize) & 0x01; |
610 | val != 0 |
611 | } |
612 | #[doc = "trigger overrun flag clear" ] |
613 | #[inline (always)] |
614 | pub fn set_tof(&mut self, val: bool) { |
615 | self.0 = (self.0 & !(0x01 << 14usize)) | (((val as u32) & 0x01) << 14usize); |
616 | } |
617 | } |
618 | impl Default for ChFcr { |
619 | #[inline (always)] |
620 | fn default() -> ChFcr { |
621 | ChFcr(0) |
622 | } |
623 | } |
624 | impl core::fmt::Debug for ChFcr { |
625 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
626 | f.debug_struct("ChFcr" ) |
627 | .field("tcf" , &self.tcf()) |
628 | .field("htf" , &self.htf()) |
629 | .field("dtef" , &self.dtef()) |
630 | .field("ulef" , &self.ulef()) |
631 | .field("usef" , &self.usef()) |
632 | .field("suspf" , &self.suspf()) |
633 | .field("tof" , &self.tof()) |
634 | .finish() |
635 | } |
636 | } |
637 | #[cfg (feature = "defmt" )] |
638 | impl defmt::Format for ChFcr { |
639 | fn format(&self, f: defmt::Formatter) { |
640 | #[derive (defmt :: Format)] |
641 | struct ChFcr { |
642 | tcf: bool, |
643 | htf: bool, |
644 | dtef: bool, |
645 | ulef: bool, |
646 | usef: bool, |
647 | suspf: bool, |
648 | tof: bool, |
649 | } |
650 | let proxy = ChFcr { |
651 | tcf: self.tcf(), |
652 | htf: self.htf(), |
653 | dtef: self.dtef(), |
654 | ulef: self.ulef(), |
655 | usef: self.usef(), |
656 | suspf: self.suspf(), |
657 | tof: self.tof(), |
658 | }; |
659 | defmt::write!(f, "{}" , proxy) |
660 | } |
661 | } |
662 | #[doc = "GPDMA channel 14 linked-list base address register" ] |
663 | #[repr (transparent)] |
664 | #[derive (Copy, Clone, Eq, PartialEq)] |
665 | pub struct ChLbar(pub u32); |
666 | impl ChLbar { |
667 | #[doc = "linked-list base address of GPDMA channel x" ] |
668 | #[inline (always)] |
669 | pub const fn lba(&self) -> u16 { |
670 | let val = (self.0 >> 16usize) & 0xffff; |
671 | val as u16 |
672 | } |
673 | #[doc = "linked-list base address of GPDMA channel x" ] |
674 | #[inline (always)] |
675 | pub fn set_lba(&mut self, val: u16) { |
676 | self.0 = (self.0 & !(0xffff << 16usize)) | (((val as u32) & 0xffff) << 16usize); |
677 | } |
678 | } |
679 | impl Default for ChLbar { |
680 | #[inline (always)] |
681 | fn default() -> ChLbar { |
682 | ChLbar(0) |
683 | } |
684 | } |
685 | impl core::fmt::Debug for ChLbar { |
686 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
687 | f.debug_struct("ChLbar" ).field("lba" , &self.lba()).finish() |
688 | } |
689 | } |
690 | #[cfg (feature = "defmt" )] |
691 | impl defmt::Format for ChLbar { |
692 | fn format(&self, f: defmt::Formatter) { |
693 | #[derive (defmt :: Format)] |
694 | struct ChLbar { |
695 | lba: u16, |
696 | } |
697 | let proxy = ChLbar { lba: self.lba() }; |
698 | defmt::write!(f, "{}" , proxy) |
699 | } |
700 | } |
701 | #[doc = "GPDMA channel 15 alternate linked-list address register" ] |
702 | #[repr (transparent)] |
703 | #[derive (Copy, Clone, Eq, PartialEq)] |
704 | pub struct ChLlr(pub u32); |
705 | impl ChLlr { |
706 | #[doc = "pointer (16-bit low-significant address) to the next linked-list data structure. If UT1 = UT2 = UB1 = USA = UDA = ULL = 0 and if LA \\[15:20 \\] |
707 | = 0, the current LLI is the last one. The channel transfer is completed without any update of the linked-list GPDMA register file. Else, this field is the pointer to the memory address offset from which the next linked-list data structure is automatically fetched from, once the data transfer is completed, in order to conditionally update the linked-list GPDMA internal register file (CH \\[x \\].CTR1, CH \\[x \\].TR2, CH \\[x \\].BR1, CH \\[x \\].SAR, CH \\[x \\].DAR and CH \\[x \\].LLR). Note: The user must program the pointer to be 32-bit aligned. The two low-significant bits are write ignored." ] |
708 | #[inline (always)] |
709 | pub const fn la(&self) -> u16 { |
710 | let val = (self.0 >> 2usize) & 0x3fff; |
711 | val as u16 |
712 | } |
713 | #[doc = "pointer (16-bit low-significant address) to the next linked-list data structure. If UT1 = UT2 = UB1 = USA = UDA = ULL = 0 and if LA \\[15:20 \\] |
714 | = 0, the current LLI is the last one. The channel transfer is completed without any update of the linked-list GPDMA register file. Else, this field is the pointer to the memory address offset from which the next linked-list data structure is automatically fetched from, once the data transfer is completed, in order to conditionally update the linked-list GPDMA internal register file (CH \\[x \\].CTR1, CH \\[x \\].TR2, CH \\[x \\].BR1, CH \\[x \\].SAR, CH \\[x \\].DAR and CH \\[x \\].LLR). Note: The user must program the pointer to be 32-bit aligned. The two low-significant bits are write ignored." ] |
715 | #[inline (always)] |
716 | pub fn set_la(&mut self, val: u16) { |
717 | self.0 = (self.0 & !(0x3fff << 2usize)) | (((val as u32) & 0x3fff) << 2usize); |
718 | } |
719 | #[doc = "Update CH \\[x \\].LLR register from memory. This bit is used to control the update of CH \\[x \\].LLR from the memory during the link transfer." ] |
720 | #[inline (always)] |
721 | pub const fn ull(&self) -> bool { |
722 | let val = (self.0 >> 16usize) & 0x01; |
723 | val != 0 |
724 | } |
725 | #[doc = "Update CH \\[x \\].LLR register from memory. This bit is used to control the update of CH \\[x \\].LLR from the memory during the link transfer." ] |
726 | #[inline (always)] |
727 | pub fn set_ull(&mut self, val: bool) { |
728 | self.0 = (self.0 & !(0x01 << 16usize)) | (((val as u32) & 0x01) << 16usize); |
729 | } |
730 | #[doc = "Update CH \\[x \\].BR2 from memory. This bit controls the update of CH \\[x \\].BR2 from the memory during the link transfer." ] |
731 | #[inline (always)] |
732 | pub const fn ub2(&self) -> bool { |
733 | let val = (self.0 >> 25usize) & 0x01; |
734 | val != 0 |
735 | } |
736 | #[doc = "Update CH \\[x \\].BR2 from memory. This bit controls the update of CH \\[x \\].BR2 from the memory during the link transfer." ] |
737 | #[inline (always)] |
738 | pub fn set_ub2(&mut self, val: bool) { |
739 | self.0 = (self.0 & !(0x01 << 25usize)) | (((val as u32) & 0x01) << 25usize); |
740 | } |
741 | #[doc = "Update CH \\[x \\].TR3 from memory. This bit controls the update of CH \\[x \\].TR3 from the memory during the link transfer." ] |
742 | #[inline (always)] |
743 | pub const fn ut3(&self) -> bool { |
744 | let val = (self.0 >> 26usize) & 0x01; |
745 | val != 0 |
746 | } |
747 | #[doc = "Update CH \\[x \\].TR3 from memory. This bit controls the update of CH \\[x \\].TR3 from the memory during the link transfer." ] |
748 | #[inline (always)] |
749 | pub fn set_ut3(&mut self, val: bool) { |
750 | self.0 = (self.0 & !(0x01 << 26usize)) | (((val as u32) & 0x01) << 26usize); |
751 | } |
752 | #[doc = "Update CH \\[x \\].DAR register from memory. This bit is used to control the update of CH \\[x \\].DAR from the memory during the link transfer." ] |
753 | #[inline (always)] |
754 | pub const fn uda(&self) -> bool { |
755 | let val = (self.0 >> 27usize) & 0x01; |
756 | val != 0 |
757 | } |
758 | #[doc = "Update CH \\[x \\].DAR register from memory. This bit is used to control the update of CH \\[x \\].DAR from the memory during the link transfer." ] |
759 | #[inline (always)] |
760 | pub fn set_uda(&mut self, val: bool) { |
761 | self.0 = (self.0 & !(0x01 << 27usize)) | (((val as u32) & 0x01) << 27usize); |
762 | } |
763 | #[doc = "update CH \\[x \\].SAR from memory. This bit controls the update of CH \\[x \\].SAR from the memory during the link transfer." ] |
764 | #[inline (always)] |
765 | pub const fn usa(&self) -> bool { |
766 | let val = (self.0 >> 28usize) & 0x01; |
767 | val != 0 |
768 | } |
769 | #[doc = "update CH \\[x \\].SAR from memory. This bit controls the update of CH \\[x \\].SAR from the memory during the link transfer." ] |
770 | #[inline (always)] |
771 | pub fn set_usa(&mut self, val: bool) { |
772 | self.0 = (self.0 & !(0x01 << 28usize)) | (((val as u32) & 0x01) << 28usize); |
773 | } |
774 | #[doc = "Update CH \\[x \\].BR1 from memory. This bit controls the update of CH \\[x \\].BR1 from the memory during the link transfer. If UB1 = 0 and if CH \\[x \\].LLR ≠ 0, the linked-list is not completed. CH \\[x \\].BR1.BNDT \\[15:0 \\] |
775 | is then restored to the programmed value after data transfer is completed and before the link transfer." ] |
776 | #[inline (always)] |
777 | pub const fn ub1(&self) -> bool { |
778 | let val = (self.0 >> 29usize) & 0x01; |
779 | val != 0 |
780 | } |
781 | #[doc = "Update CH \\[x \\].BR1 from memory. This bit controls the update of CH \\[x \\].BR1 from the memory during the link transfer. If UB1 = 0 and if CH \\[x \\].LLR ≠ 0, the linked-list is not completed. CH \\[x \\].BR1.BNDT \\[15:0 \\] |
782 | is then restored to the programmed value after data transfer is completed and before the link transfer." ] |
783 | #[inline (always)] |
784 | pub fn set_ub1(&mut self, val: bool) { |
785 | self.0 = (self.0 & !(0x01 << 29usize)) | (((val as u32) & 0x01) << 29usize); |
786 | } |
787 | #[doc = "Update CH \\[x \\].TR2 from memory. This bit controls the update of CH \\[x \\].TR2 from the memory during the link transfer." ] |
788 | #[inline (always)] |
789 | pub const fn ut2(&self) -> bool { |
790 | let val = (self.0 >> 30usize) & 0x01; |
791 | val != 0 |
792 | } |
793 | #[doc = "Update CH \\[x \\].TR2 from memory. This bit controls the update of CH \\[x \\].TR2 from the memory during the link transfer." ] |
794 | #[inline (always)] |
795 | pub fn set_ut2(&mut self, val: bool) { |
796 | self.0 = (self.0 & !(0x01 << 30usize)) | (((val as u32) & 0x01) << 30usize); |
797 | } |
798 | #[doc = "Update CH \\[x \\].TR1 from memory. This bit controls the update of CH \\[x \\].TR1 from the memory during the link transfer." ] |
799 | #[inline (always)] |
800 | pub const fn ut1(&self) -> bool { |
801 | let val = (self.0 >> 31usize) & 0x01; |
802 | val != 0 |
803 | } |
804 | #[doc = "Update CH \\[x \\].TR1 from memory. This bit controls the update of CH \\[x \\].TR1 from the memory during the link transfer." ] |
805 | #[inline (always)] |
806 | pub fn set_ut1(&mut self, val: bool) { |
807 | self.0 = (self.0 & !(0x01 << 31usize)) | (((val as u32) & 0x01) << 31usize); |
808 | } |
809 | } |
810 | impl Default for ChLlr { |
811 | #[inline (always)] |
812 | fn default() -> ChLlr { |
813 | ChLlr(0) |
814 | } |
815 | } |
816 | impl core::fmt::Debug for ChLlr { |
817 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
818 | f.debug_struct("ChLlr" ) |
819 | .field("la" , &self.la()) |
820 | .field("ull" , &self.ull()) |
821 | .field("ub2" , &self.ub2()) |
822 | .field("ut3" , &self.ut3()) |
823 | .field("uda" , &self.uda()) |
824 | .field("usa" , &self.usa()) |
825 | .field("ub1" , &self.ub1()) |
826 | .field("ut2" , &self.ut2()) |
827 | .field("ut1" , &self.ut1()) |
828 | .finish() |
829 | } |
830 | } |
831 | #[cfg (feature = "defmt" )] |
832 | impl defmt::Format for ChLlr { |
833 | fn format(&self, f: defmt::Formatter) { |
834 | #[derive (defmt :: Format)] |
835 | struct ChLlr { |
836 | la: u16, |
837 | ull: bool, |
838 | ub2: bool, |
839 | ut3: bool, |
840 | uda: bool, |
841 | usa: bool, |
842 | ub1: bool, |
843 | ut2: bool, |
844 | ut1: bool, |
845 | } |
846 | let proxy = ChLlr { |
847 | la: self.la(), |
848 | ull: self.ull(), |
849 | ub2: self.ub2(), |
850 | ut3: self.ut3(), |
851 | uda: self.uda(), |
852 | usa: self.usa(), |
853 | ub1: self.ub1(), |
854 | ut2: self.ut2(), |
855 | ut1: self.ut1(), |
856 | }; |
857 | defmt::write!(f, "{}" , proxy) |
858 | } |
859 | } |
860 | #[doc = "GPDMA channel 15 status register" ] |
861 | #[repr (transparent)] |
862 | #[derive (Copy, Clone, Eq, PartialEq)] |
863 | pub struct ChSr(pub u32); |
864 | impl ChSr { |
865 | #[doc = "idle flag. This idle flag is de-asserted by hardware when the channel is enabled (CH \\[x \\].CR.EN = 1) with a valid channel configuration (no USEF to be immediately reported). This idle flag is asserted after hard reset or by hardware when the channel is back in idle state (in suspended or disabled state)." ] |
866 | #[inline (always)] |
867 | pub const fn idlef(&self) -> bool { |
868 | let val = (self.0 >> 0usize) & 0x01; |
869 | val != 0 |
870 | } |
871 | #[doc = "idle flag. This idle flag is de-asserted by hardware when the channel is enabled (CH \\[x \\].CR.EN = 1) with a valid channel configuration (no USEF to be immediately reported). This idle flag is asserted after hard reset or by hardware when the channel is back in idle state (in suspended or disabled state)." ] |
872 | #[inline (always)] |
873 | pub fn set_idlef(&mut self, val: bool) { |
874 | self.0 = (self.0 & !(0x01 << 0usize)) | (((val as u32) & 0x01) << 0usize); |
875 | } |
876 | #[doc = "transfer complete flag. A transfer complete event is either a block transfer complete, a 2D/repeated block transfer complete, a LLI transfer complete including the upload of the next LLI if any, or the full linked-list completion, depending on the transfer complete event mode (CH \\[x \\].TR2.TCEM \\[1:0 \\])." ] |
877 | #[inline (always)] |
878 | pub const fn tcf(&self) -> bool { |
879 | let val = (self.0 >> 8usize) & 0x01; |
880 | val != 0 |
881 | } |
882 | #[doc = "transfer complete flag. A transfer complete event is either a block transfer complete, a 2D/repeated block transfer complete, a LLI transfer complete including the upload of the next LLI if any, or the full linked-list completion, depending on the transfer complete event mode (CH \\[x \\].TR2.TCEM \\[1:0 \\])." ] |
883 | #[inline (always)] |
884 | pub fn set_tcf(&mut self, val: bool) { |
885 | self.0 = (self.0 & !(0x01 << 8usize)) | (((val as u32) & 0x01) << 8usize); |
886 | } |
887 | #[doc = "half transfer flag. An half transfer event is either an half block transfer or an half 2D/repeated block transfer, depending on the transfer complete event mode (CH \\[x \\].TR2.TCEM \\[1:0 \\]). An half block transfer occurs when half of the bytes of the source block size (rounded up integer of CH \\[x \\].BR1.BNDT \\[15:0 \\]/2) has been transferred to the destination. An half 2D/repeated block transfer occurs when half of the repeated blocks (rounded up integer of (CH \\[x \\].BR1.BRC \\[10:0 \\]+1)/2)) has been transferred to the destination." ] |
888 | #[inline (always)] |
889 | pub const fn htf(&self) -> bool { |
890 | let val = (self.0 >> 9usize) & 0x01; |
891 | val != 0 |
892 | } |
893 | #[doc = "half transfer flag. An half transfer event is either an half block transfer or an half 2D/repeated block transfer, depending on the transfer complete event mode (CH \\[x \\].TR2.TCEM \\[1:0 \\]). An half block transfer occurs when half of the bytes of the source block size (rounded up integer of CH \\[x \\].BR1.BNDT \\[15:0 \\]/2) has been transferred to the destination. An half 2D/repeated block transfer occurs when half of the repeated blocks (rounded up integer of (CH \\[x \\].BR1.BRC \\[10:0 \\]+1)/2)) has been transferred to the destination." ] |
894 | #[inline (always)] |
895 | pub fn set_htf(&mut self, val: bool) { |
896 | self.0 = (self.0 & !(0x01 << 9usize)) | (((val as u32) & 0x01) << 9usize); |
897 | } |
898 | #[doc = "data transfer error flag" ] |
899 | #[inline (always)] |
900 | pub const fn dtef(&self) -> bool { |
901 | let val = (self.0 >> 10usize) & 0x01; |
902 | val != 0 |
903 | } |
904 | #[doc = "data transfer error flag" ] |
905 | #[inline (always)] |
906 | pub fn set_dtef(&mut self, val: bool) { |
907 | self.0 = (self.0 & !(0x01 << 10usize)) | (((val as u32) & 0x01) << 10usize); |
908 | } |
909 | #[doc = "update link transfer error flag" ] |
910 | #[inline (always)] |
911 | pub const fn ulef(&self) -> bool { |
912 | let val = (self.0 >> 11usize) & 0x01; |
913 | val != 0 |
914 | } |
915 | #[doc = "update link transfer error flag" ] |
916 | #[inline (always)] |
917 | pub fn set_ulef(&mut self, val: bool) { |
918 | self.0 = (self.0 & !(0x01 << 11usize)) | (((val as u32) & 0x01) << 11usize); |
919 | } |
920 | #[doc = "user setting error flag" ] |
921 | #[inline (always)] |
922 | pub const fn usef(&self) -> bool { |
923 | let val = (self.0 >> 12usize) & 0x01; |
924 | val != 0 |
925 | } |
926 | #[doc = "user setting error flag" ] |
927 | #[inline (always)] |
928 | pub fn set_usef(&mut self, val: bool) { |
929 | self.0 = (self.0 & !(0x01 << 12usize)) | (((val as u32) & 0x01) << 12usize); |
930 | } |
931 | #[doc = "completed suspension flag" ] |
932 | #[inline (always)] |
933 | pub const fn suspf(&self) -> bool { |
934 | let val = (self.0 >> 13usize) & 0x01; |
935 | val != 0 |
936 | } |
937 | #[doc = "completed suspension flag" ] |
938 | #[inline (always)] |
939 | pub fn set_suspf(&mut self, val: bool) { |
940 | self.0 = (self.0 & !(0x01 << 13usize)) | (((val as u32) & 0x01) << 13usize); |
941 | } |
942 | #[doc = "trigger overrun flag" ] |
943 | #[inline (always)] |
944 | pub const fn tof(&self) -> bool { |
945 | let val = (self.0 >> 14usize) & 0x01; |
946 | val != 0 |
947 | } |
948 | #[doc = "trigger overrun flag" ] |
949 | #[inline (always)] |
950 | pub fn set_tof(&mut self, val: bool) { |
951 | self.0 = (self.0 & !(0x01 << 14usize)) | (((val as u32) & 0x01) << 14usize); |
952 | } |
953 | #[doc = "monitored FIFO level. Number of available write beats in the FIFO, in units of the programmed destination data width (see CH \\[x \\].TR1.DDW \\[1:0 \\], in units of bytes, half-words, or words). Note: After having suspended an active transfer, the user may need to read FIFOL \\[7:0 \\], additionally to CH \\[x \\].BR1.BDNT \\[15:0 \\] |
954 | and CH \\[x \\].BR1.BRC \\[10:0 \\], to know how many data have been transferred to the destination. Before reading, the user may wait for the transfer to be suspended (CH \\[x \\].SR.SUSPF = 1)." ] |
955 | #[inline (always)] |
956 | pub const fn fifol(&self) -> u8 { |
957 | let val = (self.0 >> 16usize) & 0xff; |
958 | val as u8 |
959 | } |
960 | #[doc = "monitored FIFO level. Number of available write beats in the FIFO, in units of the programmed destination data width (see CH \\[x \\].TR1.DDW \\[1:0 \\], in units of bytes, half-words, or words). Note: After having suspended an active transfer, the user may need to read FIFOL \\[7:0 \\], additionally to CH \\[x \\].BR1.BDNT \\[15:0 \\] |
961 | and CH \\[x \\].BR1.BRC \\[10:0 \\], to know how many data have been transferred to the destination. Before reading, the user may wait for the transfer to be suspended (CH \\[x \\].SR.SUSPF = 1)." ] |
962 | #[inline (always)] |
963 | pub fn set_fifol(&mut self, val: u8) { |
964 | self.0 = (self.0 & !(0xff << 16usize)) | (((val as u32) & 0xff) << 16usize); |
965 | } |
966 | } |
967 | impl Default for ChSr { |
968 | #[inline (always)] |
969 | fn default() -> ChSr { |
970 | ChSr(0) |
971 | } |
972 | } |
973 | impl core::fmt::Debug for ChSr { |
974 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
975 | f.debug_struct("ChSr" ) |
976 | .field("idlef" , &self.idlef()) |
977 | .field("tcf" , &self.tcf()) |
978 | .field("htf" , &self.htf()) |
979 | .field("dtef" , &self.dtef()) |
980 | .field("ulef" , &self.ulef()) |
981 | .field("usef" , &self.usef()) |
982 | .field("suspf" , &self.suspf()) |
983 | .field("tof" , &self.tof()) |
984 | .field("fifol" , &self.fifol()) |
985 | .finish() |
986 | } |
987 | } |
988 | #[cfg (feature = "defmt" )] |
989 | impl defmt::Format for ChSr { |
990 | fn format(&self, f: defmt::Formatter) { |
991 | #[derive (defmt :: Format)] |
992 | struct ChSr { |
993 | idlef: bool, |
994 | tcf: bool, |
995 | htf: bool, |
996 | dtef: bool, |
997 | ulef: bool, |
998 | usef: bool, |
999 | suspf: bool, |
1000 | tof: bool, |
1001 | fifol: u8, |
1002 | } |
1003 | let proxy = ChSr { |
1004 | idlef: self.idlef(), |
1005 | tcf: self.tcf(), |
1006 | htf: self.htf(), |
1007 | dtef: self.dtef(), |
1008 | ulef: self.ulef(), |
1009 | usef: self.usef(), |
1010 | suspf: self.suspf(), |
1011 | tof: self.tof(), |
1012 | fifol: self.fifol(), |
1013 | }; |
1014 | defmt::write!(f, "{}" , proxy) |
1015 | } |
1016 | } |
1017 | #[doc = "GPDMA channel 8 transfer register 1" ] |
1018 | #[repr (transparent)] |
1019 | #[derive (Copy, Clone, Eq, PartialEq)] |
1020 | pub struct ChTr1(pub u32); |
1021 | impl ChTr1 { |
1022 | #[doc = "binary logarithm of the source data width of a burst in bytes. Note: Setting a 8-byte data width causes a user setting error to be reported and no transfer is issued. A source block size must be a multiple of the source data width (CH \\[x \\].BR1.BNDT \\[2:0 \\] |
1023 | versus SDW_LOG2 \\[1:0 \\]). Otherwise, a user setting error is reported and no transfer is issued. A source single transfer must have an aligned address with its data width (start address CH \\[x \\].SAR \\[2:0 \\] |
1024 | versus SDW_LOG2 \\[1:0 \\]). Otherwise, a user setting error is reported and none transfer is issued." ] |
1025 | #[inline (always)] |
1026 | pub const fn sdw(&self) -> super::vals::Dw { |
1027 | let val = (self.0 >> 0usize) & 0x03; |
1028 | super::vals::Dw::from_bits(val as u8) |
1029 | } |
1030 | #[doc = "binary logarithm of the source data width of a burst in bytes. Note: Setting a 8-byte data width causes a user setting error to be reported and no transfer is issued. A source block size must be a multiple of the source data width (CH \\[x \\].BR1.BNDT \\[2:0 \\] |
1031 | versus SDW_LOG2 \\[1:0 \\]). Otherwise, a user setting error is reported and no transfer is issued. A source single transfer must have an aligned address with its data width (start address CH \\[x \\].SAR \\[2:0 \\] |
1032 | versus SDW_LOG2 \\[1:0 \\]). Otherwise, a user setting error is reported and none transfer is issued." ] |
1033 | #[inline (always)] |
1034 | pub fn set_sdw(&mut self, val: super::vals::Dw) { |
1035 | self.0 = (self.0 & !(0x03 << 0usize)) | (((val.to_bits() as u32) & 0x03) << 0usize); |
1036 | } |
1037 | #[doc = "source incrementing burst. The source address, pointed by CH \\[x \\].SAR, is kept constant after a burst beat/single transfer or is incremented by the offset value corresponding to a contiguous data after a burst beat/single transfer." ] |
1038 | #[inline (always)] |
1039 | pub const fn sinc(&self) -> bool { |
1040 | let val = (self.0 >> 3usize) & 0x01; |
1041 | val != 0 |
1042 | } |
1043 | #[doc = "source incrementing burst. The source address, pointed by CH \\[x \\].SAR, is kept constant after a burst beat/single transfer or is incremented by the offset value corresponding to a contiguous data after a burst beat/single transfer." ] |
1044 | #[inline (always)] |
1045 | pub fn set_sinc(&mut self, val: bool) { |
1046 | self.0 = (self.0 & !(0x01 << 3usize)) | (((val as u32) & 0x01) << 3usize); |
1047 | } |
1048 | #[doc = "source burst length minus 1, between 0 and 63. The burst length unit is one data named beat within a burst. If SBL_1 \\[5:0 \\] |
1049 | =0 , the burst can be named as single. Each data/beat has a width defined by the destination data width SDW_LOG2 \\[1:0 \\]. Note: If a burst transfer crossed a 1-Kbyte address boundary on a AHB transfer, the GPDMA modifies and shortens the programmed burst into singles or bursts of lower length, to be compliant with the AHB protocol. If a burst transfer is of length greater than the FIFO size of the channel x, the GPDMA modifies and shortens the programmed burst into singles or bursts of lower length, to be compliant with the FIFO size. Transfer performance is lower, with GPDMA re-arbitration between effective and lower bursts/singles, but the data integrity is guaranteed." ] |
1050 | #[inline (always)] |
1051 | pub const fn sbl_1(&self) -> u8 { |
1052 | let val = (self.0 >> 4usize) & 0x3f; |
1053 | val as u8 |
1054 | } |
1055 | #[doc = "source burst length minus 1, between 0 and 63. The burst length unit is one data named beat within a burst. If SBL_1 \\[5:0 \\] |
1056 | =0 , the burst can be named as single. Each data/beat has a width defined by the destination data width SDW_LOG2 \\[1:0 \\]. Note: If a burst transfer crossed a 1-Kbyte address boundary on a AHB transfer, the GPDMA modifies and shortens the programmed burst into singles or bursts of lower length, to be compliant with the AHB protocol. If a burst transfer is of length greater than the FIFO size of the channel x, the GPDMA modifies and shortens the programmed burst into singles or bursts of lower length, to be compliant with the FIFO size. Transfer performance is lower, with GPDMA re-arbitration between effective and lower bursts/singles, but the data integrity is guaranteed." ] |
1057 | #[inline (always)] |
1058 | pub fn set_sbl_1(&mut self, val: u8) { |
1059 | self.0 = (self.0 & !(0x3f << 4usize)) | (((val as u32) & 0x3f) << 4usize); |
1060 | } |
1061 | #[doc = "padding/alignment mode. If DDW \\[1:0 \\] |
1062 | = SDW_LOG2 \\[1:0 \\]: if the data width of a burst destination transfer is equal to the data width of a burst source transfer, these bits are ignored. Else: - Case 1: If destination data width > source data width. 1x: successive source data are FIFO queued and packed at the destination data width, in a left (LSB) to right (MSB) order (named little endian), before a destination transfer. - Case 2: If destination data width < source data width. 1x: source data is FIFO queued and unpacked at the destination data width, to be transferred in a left (LSB) to right (MSB) order (named little endian) to the destination. Note:" ] |
1063 | #[inline (always)] |
1064 | pub const fn pam(&self) -> super::vals::Pam { |
1065 | let val = (self.0 >> 11usize) & 0x03; |
1066 | super::vals::Pam::from_bits(val as u8) |
1067 | } |
1068 | #[doc = "padding/alignment mode. If DDW \\[1:0 \\] |
1069 | = SDW_LOG2 \\[1:0 \\]: if the data width of a burst destination transfer is equal to the data width of a burst source transfer, these bits are ignored. Else: - Case 1: If destination data width > source data width. 1x: successive source data are FIFO queued and packed at the destination data width, in a left (LSB) to right (MSB) order (named little endian), before a destination transfer. - Case 2: If destination data width < source data width. 1x: source data is FIFO queued and unpacked at the destination data width, to be transferred in a left (LSB) to right (MSB) order (named little endian) to the destination. Note:" ] |
1070 | #[inline (always)] |
1071 | pub fn set_pam(&mut self, val: super::vals::Pam) { |
1072 | self.0 = (self.0 & !(0x03 << 11usize)) | (((val.to_bits() as u32) & 0x03) << 11usize); |
1073 | } |
1074 | #[doc = "source byte exchange within the unaligned half-word of each source word. If set, the two consecutive bytes within the unaligned half-word of each source word are exchanged. If the source data width is shorter than a word, this bit is ignored." ] |
1075 | #[inline (always)] |
1076 | pub const fn sbx(&self) -> bool { |
1077 | let val = (self.0 >> 13usize) & 0x01; |
1078 | val != 0 |
1079 | } |
1080 | #[doc = "source byte exchange within the unaligned half-word of each source word. If set, the two consecutive bytes within the unaligned half-word of each source word are exchanged. If the source data width is shorter than a word, this bit is ignored." ] |
1081 | #[inline (always)] |
1082 | pub fn set_sbx(&mut self, val: bool) { |
1083 | self.0 = (self.0 & !(0x01 << 13usize)) | (((val as u32) & 0x01) << 13usize); |
1084 | } |
1085 | #[doc = "source allocated port. This bit is used to allocate the master port for the source transfer. Note: This bit must be written when EN = 0. This bit is read-only when EN = 1." ] |
1086 | #[inline (always)] |
1087 | pub const fn sap(&self) -> super::vals::Ap { |
1088 | let val = (self.0 >> 14usize) & 0x01; |
1089 | super::vals::Ap::from_bits(val as u8) |
1090 | } |
1091 | #[doc = "source allocated port. This bit is used to allocate the master port for the source transfer. Note: This bit must be written when EN = 0. This bit is read-only when EN = 1." ] |
1092 | #[inline (always)] |
1093 | pub fn set_sap(&mut self, val: super::vals::Ap) { |
1094 | self.0 = (self.0 & !(0x01 << 14usize)) | (((val.to_bits() as u32) & 0x01) << 14usize); |
1095 | } |
1096 | #[doc = "security attribute of the GPDMA transfer from the source. If SECCFGR.SECx = 1 and the access is secure: This is a secure register bit. This bit can only be read by a secure software. This bit must be written by a secure software when SECCFGR.SECx =1 . A secure write is ignored when SECCFGR.SECx = 0. When SECCFGR.SECx is de-asserted, this SSEC bit is also de-asserted by hardware (on a secure reconfiguration of the channel as non-secure), and the GPDMA transfer from the source is non-secure." ] |
1097 | #[inline (always)] |
1098 | pub const fn ssec(&self) -> bool { |
1099 | let val = (self.0 >> 15usize) & 0x01; |
1100 | val != 0 |
1101 | } |
1102 | #[doc = "security attribute of the GPDMA transfer from the source. If SECCFGR.SECx = 1 and the access is secure: This is a secure register bit. This bit can only be read by a secure software. This bit must be written by a secure software when SECCFGR.SECx =1 . A secure write is ignored when SECCFGR.SECx = 0. When SECCFGR.SECx is de-asserted, this SSEC bit is also de-asserted by hardware (on a secure reconfiguration of the channel as non-secure), and the GPDMA transfer from the source is non-secure." ] |
1103 | #[inline (always)] |
1104 | pub fn set_ssec(&mut self, val: bool) { |
1105 | self.0 = (self.0 & !(0x01 << 15usize)) | (((val as u32) & 0x01) << 15usize); |
1106 | } |
1107 | #[doc = "binary logarithm of the destination data width of a burst, in bytes. Note: Setting a 8-byte data width causes a user setting error to be reported and none transfer is issued. A destination burst transfer must have an aligned address with its data width (start address CH \\[x \\].DAR \\[2:0 \\] |
1108 | and address offset CH \\[x \\].TR3.DAO \\[2:0 \\], versus DDW \\[1:0 \\]). Otherwise a user setting error is reported and no transfer is issued." ] |
1109 | #[inline (always)] |
1110 | pub const fn ddw(&self) -> super::vals::Dw { |
1111 | let val = (self.0 >> 16usize) & 0x03; |
1112 | super::vals::Dw::from_bits(val as u8) |
1113 | } |
1114 | #[doc = "binary logarithm of the destination data width of a burst, in bytes. Note: Setting a 8-byte data width causes a user setting error to be reported and none transfer is issued. A destination burst transfer must have an aligned address with its data width (start address CH \\[x \\].DAR \\[2:0 \\] |
1115 | and address offset CH \\[x \\].TR3.DAO \\[2:0 \\], versus DDW \\[1:0 \\]). Otherwise a user setting error is reported and no transfer is issued." ] |
1116 | #[inline (always)] |
1117 | pub fn set_ddw(&mut self, val: super::vals::Dw) { |
1118 | self.0 = (self.0 & !(0x03 << 16usize)) | (((val.to_bits() as u32) & 0x03) << 16usize); |
1119 | } |
1120 | #[doc = "destination incrementing burst. The destination address, pointed by CH \\[x \\].DAR, is kept constant after a burst beat/single transfer, or is incremented by the offset value corresponding to a contiguous data after a burst beat/single transfer." ] |
1121 | #[inline (always)] |
1122 | pub const fn dinc(&self) -> bool { |
1123 | let val = (self.0 >> 19usize) & 0x01; |
1124 | val != 0 |
1125 | } |
1126 | #[doc = "destination incrementing burst. The destination address, pointed by CH \\[x \\].DAR, is kept constant after a burst beat/single transfer, or is incremented by the offset value corresponding to a contiguous data after a burst beat/single transfer." ] |
1127 | #[inline (always)] |
1128 | pub fn set_dinc(&mut self, val: bool) { |
1129 | self.0 = (self.0 & !(0x01 << 19usize)) | (((val as u32) & 0x01) << 19usize); |
1130 | } |
1131 | #[doc = "destination burst length minus 1, between 0 and 63. The burst length unit is one data named beat within a burst. If DBL_1 \\[5:0 \\] |
1132 | =0 , the burst can be named as single. Each data/beat has a width defined by the destination data width DDW \\[1:0 \\]. Note: If a burst transfer crossed a 1-Kbyte address boundary on a AHB transfer, the GPDMA modifies and shortens the programmed burst into singles or bursts of lower length, to be compliant with the AHB protocol. If a burst transfer is of length greater than the FIFO size of the channel x, the GPDMA modifies and shortens the programmed burst into singles or bursts of lower length, to be compliant with the FIFO size. Transfer performance is lower, with GPDMA re-arbitration between effective and lower bursts/singles, but the data integrity is guaranteed." ] |
1133 | #[inline (always)] |
1134 | pub const fn dbl_1(&self) -> u8 { |
1135 | let val = (self.0 >> 20usize) & 0x3f; |
1136 | val as u8 |
1137 | } |
1138 | #[doc = "destination burst length minus 1, between 0 and 63. The burst length unit is one data named beat within a burst. If DBL_1 \\[5:0 \\] |
1139 | =0 , the burst can be named as single. Each data/beat has a width defined by the destination data width DDW \\[1:0 \\]. Note: If a burst transfer crossed a 1-Kbyte address boundary on a AHB transfer, the GPDMA modifies and shortens the programmed burst into singles or bursts of lower length, to be compliant with the AHB protocol. If a burst transfer is of length greater than the FIFO size of the channel x, the GPDMA modifies and shortens the programmed burst into singles or bursts of lower length, to be compliant with the FIFO size. Transfer performance is lower, with GPDMA re-arbitration between effective and lower bursts/singles, but the data integrity is guaranteed." ] |
1140 | #[inline (always)] |
1141 | pub fn set_dbl_1(&mut self, val: u8) { |
1142 | self.0 = (self.0 & !(0x3f << 20usize)) | (((val as u32) & 0x3f) << 20usize); |
1143 | } |
1144 | #[doc = "destination byte exchange. IF set, the two consecutive (post PAM) bytes are exchanged in each destination half-word. If the destination data size is a byte, this bit is ignored." ] |
1145 | #[inline (always)] |
1146 | pub const fn dbx(&self) -> bool { |
1147 | let val = (self.0 >> 26usize) & 0x01; |
1148 | val != 0 |
1149 | } |
1150 | #[doc = "destination byte exchange. IF set, the two consecutive (post PAM) bytes are exchanged in each destination half-word. If the destination data size is a byte, this bit is ignored." ] |
1151 | #[inline (always)] |
1152 | pub fn set_dbx(&mut self, val: bool) { |
1153 | self.0 = (self.0 & !(0x01 << 26usize)) | (((val as u32) & 0x01) << 26usize); |
1154 | } |
1155 | #[doc = "destination half-word exchange. If set, e two consecutive (post PAM) half-words are exchanged in each destination word. If the destination data size is shorter than a word, this bit is ignored." ] |
1156 | #[inline (always)] |
1157 | pub const fn dhx(&self) -> bool { |
1158 | let val = (self.0 >> 27usize) & 0x01; |
1159 | val != 0 |
1160 | } |
1161 | #[doc = "destination half-word exchange. If set, e two consecutive (post PAM) half-words are exchanged in each destination word. If the destination data size is shorter than a word, this bit is ignored." ] |
1162 | #[inline (always)] |
1163 | pub fn set_dhx(&mut self, val: bool) { |
1164 | self.0 = (self.0 & !(0x01 << 27usize)) | (((val as u32) & 0x01) << 27usize); |
1165 | } |
1166 | #[doc = "destination allocated port. This bit is used to allocate the master port for the destination transfer. Note: This bit must be written when EN = 0. This bit is read-only when EN = 1." ] |
1167 | #[inline (always)] |
1168 | pub const fn dap(&self) -> super::vals::Ap { |
1169 | let val = (self.0 >> 30usize) & 0x01; |
1170 | super::vals::Ap::from_bits(val as u8) |
1171 | } |
1172 | #[doc = "destination allocated port. This bit is used to allocate the master port for the destination transfer. Note: This bit must be written when EN = 0. This bit is read-only when EN = 1." ] |
1173 | #[inline (always)] |
1174 | pub fn set_dap(&mut self, val: super::vals::Ap) { |
1175 | self.0 = (self.0 & !(0x01 << 30usize)) | (((val.to_bits() as u32) & 0x01) << 30usize); |
1176 | } |
1177 | #[doc = "security attribute of the GPDMA transfer to the destination. If SECCFGR.SECx = 1 and the access is secure: This is a secure register bit. This bit can only be read by a secure software. This bit must be written by a secure software when SECCFGR.SECx = 1. A secure write is ignored when SECCFGR.SECx = 0. When SECCFGR.SECx is de-asserted, this DSEC bit is also de-asserted by hardware (on a secure reconfiguration of the channel as non-secure), and the GPDMA transfer to the destination is non-secure." ] |
1178 | #[inline (always)] |
1179 | pub const fn dsec(&self) -> bool { |
1180 | let val = (self.0 >> 31usize) & 0x01; |
1181 | val != 0 |
1182 | } |
1183 | #[doc = "security attribute of the GPDMA transfer to the destination. If SECCFGR.SECx = 1 and the access is secure: This is a secure register bit. This bit can only be read by a secure software. This bit must be written by a secure software when SECCFGR.SECx = 1. A secure write is ignored when SECCFGR.SECx = 0. When SECCFGR.SECx is de-asserted, this DSEC bit is also de-asserted by hardware (on a secure reconfiguration of the channel as non-secure), and the GPDMA transfer to the destination is non-secure." ] |
1184 | #[inline (always)] |
1185 | pub fn set_dsec(&mut self, val: bool) { |
1186 | self.0 = (self.0 & !(0x01 << 31usize)) | (((val as u32) & 0x01) << 31usize); |
1187 | } |
1188 | } |
1189 | impl Default for ChTr1 { |
1190 | #[inline (always)] |
1191 | fn default() -> ChTr1 { |
1192 | ChTr1(0) |
1193 | } |
1194 | } |
1195 | impl core::fmt::Debug for ChTr1 { |
1196 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
1197 | f.debug_struct("ChTr1" ) |
1198 | .field("sdw" , &self.sdw()) |
1199 | .field("sinc" , &self.sinc()) |
1200 | .field("sbl_1" , &self.sbl_1()) |
1201 | .field("pam" , &self.pam()) |
1202 | .field("sbx" , &self.sbx()) |
1203 | .field("sap" , &self.sap()) |
1204 | .field("ssec" , &self.ssec()) |
1205 | .field("ddw" , &self.ddw()) |
1206 | .field("dinc" , &self.dinc()) |
1207 | .field("dbl_1" , &self.dbl_1()) |
1208 | .field("dbx" , &self.dbx()) |
1209 | .field("dhx" , &self.dhx()) |
1210 | .field("dap" , &self.dap()) |
1211 | .field("dsec" , &self.dsec()) |
1212 | .finish() |
1213 | } |
1214 | } |
1215 | #[cfg (feature = "defmt" )] |
1216 | impl defmt::Format for ChTr1 { |
1217 | fn format(&self, f: defmt::Formatter) { |
1218 | #[derive (defmt :: Format)] |
1219 | struct ChTr1 { |
1220 | sdw: super::vals::Dw, |
1221 | sinc: bool, |
1222 | sbl_1: u8, |
1223 | pam: super::vals::Pam, |
1224 | sbx: bool, |
1225 | sap: super::vals::Ap, |
1226 | ssec: bool, |
1227 | ddw: super::vals::Dw, |
1228 | dinc: bool, |
1229 | dbl_1: u8, |
1230 | dbx: bool, |
1231 | dhx: bool, |
1232 | dap: super::vals::Ap, |
1233 | dsec: bool, |
1234 | } |
1235 | let proxy = ChTr1 { |
1236 | sdw: self.sdw(), |
1237 | sinc: self.sinc(), |
1238 | sbl_1: self.sbl_1(), |
1239 | pam: self.pam(), |
1240 | sbx: self.sbx(), |
1241 | sap: self.sap(), |
1242 | ssec: self.ssec(), |
1243 | ddw: self.ddw(), |
1244 | dinc: self.dinc(), |
1245 | dbl_1: self.dbl_1(), |
1246 | dbx: self.dbx(), |
1247 | dhx: self.dhx(), |
1248 | dap: self.dap(), |
1249 | dsec: self.dsec(), |
1250 | }; |
1251 | defmt::write!(f, "{}" , proxy) |
1252 | } |
1253 | } |
1254 | #[doc = "GPDMA channel 10 transfer register 2" ] |
1255 | #[repr (transparent)] |
1256 | #[derive (Copy, Clone, Eq, PartialEq)] |
1257 | pub struct ChTr2(pub u32); |
1258 | impl ChTr2 { |
1259 | #[doc = "GPDMA hardware request selection. These bits are ignored if channel x is activated (CH \\[x \\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer). Else, the selected hardware request is internally taken into account as per . The user must not assign a same input hardware request (same REQSEL \\[6:0 \\] |
1260 | value) to different active GPDMA channels (CH \\[x \\].CR.EN = 1 and CH \\[x \\].TR2.SWREQ = 0 for these channels). GPDMA is not intended to hardware support the case of simultaneous enabled channels incorrectly configured with a same hardware peripheral request signal, and there is no user setting error reporting." ] |
1261 | #[inline (always)] |
1262 | pub const fn reqsel(&self) -> u8 { |
1263 | let val = (self.0 >> 0usize) & 0x7f; |
1264 | val as u8 |
1265 | } |
1266 | #[doc = "GPDMA hardware request selection. These bits are ignored if channel x is activated (CH \\[x \\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer). Else, the selected hardware request is internally taken into account as per . The user must not assign a same input hardware request (same REQSEL \\[6:0 \\] |
1267 | value) to different active GPDMA channels (CH \\[x \\].CR.EN = 1 and CH \\[x \\].TR2.SWREQ = 0 for these channels). GPDMA is not intended to hardware support the case of simultaneous enabled channels incorrectly configured with a same hardware peripheral request signal, and there is no user setting error reporting." ] |
1268 | #[inline (always)] |
1269 | pub fn set_reqsel(&mut self, val: u8) { |
1270 | self.0 = (self.0 & !(0x7f << 0usize)) | (((val as u32) & 0x7f) << 0usize); |
1271 | } |
1272 | #[doc = "software request. This bit is internally taken into account when CH \\[x \\].CR.EN is asserted." ] |
1273 | #[inline (always)] |
1274 | pub const fn swreq(&self) -> super::vals::Swreq { |
1275 | let val = (self.0 >> 9usize) & 0x01; |
1276 | super::vals::Swreq::from_bits(val as u8) |
1277 | } |
1278 | #[doc = "software request. This bit is internally taken into account when CH \\[x \\].CR.EN is asserted." ] |
1279 | #[inline (always)] |
1280 | pub fn set_swreq(&mut self, val: super::vals::Swreq) { |
1281 | self.0 = (self.0 & !(0x01 << 9usize)) | (((val.to_bits() as u32) & 0x01) << 9usize); |
1282 | } |
1283 | #[doc = "destination hardware request. This bit is ignored if channel x is activated (CH \\[x \\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer). Else: Note:" ] |
1284 | #[inline (always)] |
1285 | pub const fn dreq(&self) -> super::vals::Dreq { |
1286 | let val = (self.0 >> 10usize) & 0x01; |
1287 | super::vals::Dreq::from_bits(val as u8) |
1288 | } |
1289 | #[doc = "destination hardware request. This bit is ignored if channel x is activated (CH \\[x \\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer). Else: Note:" ] |
1290 | #[inline (always)] |
1291 | pub fn set_dreq(&mut self, val: super::vals::Dreq) { |
1292 | self.0 = (self.0 & !(0x01 << 10usize)) | (((val.to_bits() as u32) & 0x01) << 10usize); |
1293 | } |
1294 | #[doc = "Block hardware request. If the channel x is activated (CH \\[x \\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer), this bit is ignored. Else:" ] |
1295 | #[inline (always)] |
1296 | pub const fn breq(&self) -> super::vals::Breq { |
1297 | let val = (self.0 >> 11usize) & 0x01; |
1298 | super::vals::Breq::from_bits(val as u8) |
1299 | } |
1300 | #[doc = "Block hardware request. If the channel x is activated (CH \\[x \\].CR.EN asserted) with SWREQ = 1 (software request for a memory-to-memory transfer), this bit is ignored. Else:" ] |
1301 | #[inline (always)] |
1302 | pub fn set_breq(&mut self, val: super::vals::Breq) { |
1303 | self.0 = (self.0 & !(0x01 << 11usize)) | (((val.to_bits() as u32) & 0x01) << 11usize); |
1304 | } |
1305 | #[doc = "trigger mode. These bits define the transfer granularity for its conditioning by the trigger. If the channel x is enabled (CH \\[x \\].CR.EN asserted) with TRIGPOL \\[1:0 \\] |
1306 | = 00 or 11, these TRIGM \\[1:0 \\] |
1307 | bits are ignored. Else, a GPDMA transfer is conditioned by at least one trigger hit: first burst read of a 2D/repeated block transfer is conditioned by one hit trigger. – If the peripheral is programmed as a source (DREQ = 0) of the LLI data transfer, each programmed burst read is conditioned. – If the peripheral is programmed as a destination (DREQ = 1) of the LLI data transfer, each programmed burst write is conditioned. The first memory burst read of a (possibly 2D/repeated) block, also named as the first ready FIFO-based source burst, is gated by the occurrence of both the hardware request and the first trigger hit. The GPDMA monitoring of a trigger for channel x is started when the channel is enabled/loaded with a new active trigger configuration: rising or falling edge on a selected trigger (TRIGPOL \\[1:0 \\] |
1308 | = 01 or respectively TRIGPOL \\[1:0 \\] |
1309 | = 10). The monitoring of this trigger is kept active during the triggered and uncompleted (data or link) transfer; and if a new trigger is detected then, this hit is internally memorized to grant the next transfer, as long as the defined rising or falling edge is not modified, and the TRIGSEL \\[5:0 \\] |
1310 | is not modified, and the channel is enabled. Transferring a next LLIn+1 that updates the CH \\[x \\].TR2 with a new value for any of TRIGSEL \\[5:0 \\] |
1311 | or TRIGPOL \\[1:0 \\], resets the monitoring, trashing the memorized hit of the formerly defined LLIn trigger. After a first new trigger hitn+1 is memorized, if another second trigger hitn+2 is detected and if the hitn triggered transfer is still not completed, hitn+2 is lost and not memorized.memorized. A trigger overrun flag is reported (CH \\[x \\].SR.TOF =1 ), and an interrupt is generated if enabled (CH \\[x \\].CR.TOIE = 1). The channel is not automatically disabled by hardware due to a trigger overrun. Note: When the source block size is not a multiple of the source burst size and is a multiple of the source data width, then the last programmed source burst is not completed and is internally shorten to match the block size. In this case, if TRIGM \\[1:0 \\] |
1312 | = 11 and (SWREQ =1 or (SWREQ = 0 and DREQ =0 )), the shortened burst transfer (by singles or/and by bursts of lower length) is conditioned once by the trigger. When the programmed destination burst is internally shortened by singles or/and by bursts of lower length (versus FIFO size, versus block size, 1-Kbyte boundary address crossing): if the trigger is conditioning the programmed destination burst (if TRIGM \\[1:0 \\] |
1313 | = 11 and SWREQ = 0 and DREQ = 1), this shortened destination burst transfer is conditioned once by the trigger." ] |
1314 | #[inline (always)] |
1315 | pub const fn trigm(&self) -> super::vals::Trigm { |
1316 | let val = (self.0 >> 14usize) & 0x03; |
1317 | super::vals::Trigm::from_bits(val as u8) |
1318 | } |
1319 | #[doc = "trigger mode. These bits define the transfer granularity for its conditioning by the trigger. If the channel x is enabled (CH \\[x \\].CR.EN asserted) with TRIGPOL \\[1:0 \\] |
1320 | = 00 or 11, these TRIGM \\[1:0 \\] |
1321 | bits are ignored. Else, a GPDMA transfer is conditioned by at least one trigger hit: first burst read of a 2D/repeated block transfer is conditioned by one hit trigger. – If the peripheral is programmed as a source (DREQ = 0) of the LLI data transfer, each programmed burst read is conditioned. – If the peripheral is programmed as a destination (DREQ = 1) of the LLI data transfer, each programmed burst write is conditioned. The first memory burst read of a (possibly 2D/repeated) block, also named as the first ready FIFO-based source burst, is gated by the occurrence of both the hardware request and the first trigger hit. The GPDMA monitoring of a trigger for channel x is started when the channel is enabled/loaded with a new active trigger configuration: rising or falling edge on a selected trigger (TRIGPOL \\[1:0 \\] |
1322 | = 01 or respectively TRIGPOL \\[1:0 \\] |
1323 | = 10). The monitoring of this trigger is kept active during the triggered and uncompleted (data or link) transfer; and if a new trigger is detected then, this hit is internally memorized to grant the next transfer, as long as the defined rising or falling edge is not modified, and the TRIGSEL \\[5:0 \\] |
1324 | is not modified, and the channel is enabled. Transferring a next LLIn+1 that updates the CH \\[x \\].TR2 with a new value for any of TRIGSEL \\[5:0 \\] |
1325 | or TRIGPOL \\[1:0 \\], resets the monitoring, trashing the memorized hit of the formerly defined LLIn trigger. After a first new trigger hitn+1 is memorized, if another second trigger hitn+2 is detected and if the hitn triggered transfer is still not completed, hitn+2 is lost and not memorized.memorized. A trigger overrun flag is reported (CH \\[x \\].SR.TOF =1 ), and an interrupt is generated if enabled (CH \\[x \\].CR.TOIE = 1). The channel is not automatically disabled by hardware due to a trigger overrun. Note: When the source block size is not a multiple of the source burst size and is a multiple of the source data width, then the last programmed source burst is not completed and is internally shorten to match the block size. In this case, if TRIGM \\[1:0 \\] |
1326 | = 11 and (SWREQ =1 or (SWREQ = 0 and DREQ =0 )), the shortened burst transfer (by singles or/and by bursts of lower length) is conditioned once by the trigger. When the programmed destination burst is internally shortened by singles or/and by bursts of lower length (versus FIFO size, versus block size, 1-Kbyte boundary address crossing): if the trigger is conditioning the programmed destination burst (if TRIGM \\[1:0 \\] |
1327 | = 11 and SWREQ = 0 and DREQ = 1), this shortened destination burst transfer is conditioned once by the trigger." ] |
1328 | #[inline (always)] |
1329 | pub fn set_trigm(&mut self, val: super::vals::Trigm) { |
1330 | self.0 = (self.0 & !(0x03 << 14usize)) | (((val.to_bits() as u32) & 0x03) << 14usize); |
1331 | } |
1332 | #[doc = "trigger event input selection. These bits select the trigger event input of the GPDMA transfer (as per ), with an active trigger event if TRIGPOL \\[1:0 \\] |
1333 | ≠ 00." ] |
1334 | #[inline (always)] |
1335 | pub const fn trigsel(&self) -> u8 { |
1336 | let val = (self.0 >> 16usize) & 0x3f; |
1337 | val as u8 |
1338 | } |
1339 | #[doc = "trigger event input selection. These bits select the trigger event input of the GPDMA transfer (as per ), with an active trigger event if TRIGPOL \\[1:0 \\] |
1340 | ≠ 00." ] |
1341 | #[inline (always)] |
1342 | pub fn set_trigsel(&mut self, val: u8) { |
1343 | self.0 = (self.0 & !(0x3f << 16usize)) | (((val as u32) & 0x3f) << 16usize); |
1344 | } |
1345 | #[doc = "trigger event polarity. These bits define the polarity of the selected trigger event input defined by TRIGSEL \\[5:0 \\]." ] |
1346 | #[inline (always)] |
1347 | pub const fn trigpol(&self) -> super::vals::Trigpol { |
1348 | let val = (self.0 >> 24usize) & 0x03; |
1349 | super::vals::Trigpol::from_bits(val as u8) |
1350 | } |
1351 | #[doc = "trigger event polarity. These bits define the polarity of the selected trigger event input defined by TRIGSEL \\[5:0 \\]." ] |
1352 | #[inline (always)] |
1353 | pub fn set_trigpol(&mut self, val: super::vals::Trigpol) { |
1354 | self.0 = (self.0 & !(0x03 << 24usize)) | (((val.to_bits() as u32) & 0x03) << 24usize); |
1355 | } |
1356 | #[doc = "transfer complete event mode. These bits define the transfer granularity for the transfer complete and half transfer complete events generation. Note: If the initial LLI0 data transfer is null/void (directly programmed by the internal register file with CH \\[x \\].BR1.BNDT \\[15:0 \\] |
1357 | = 0), then neither the complete transfer event nor the half transfer event is generated. Note: If the initial LLI0 data transfer is null/void (directly programmed by the internal register file with CH \\[x \\].BR1.BNDT \\[15:0 \\] |
1358 | = 0), then neither the complete transfer event nor the half transfer event is generated. Note: If the initial LLI0 data transfer is null/void (i.e. directly programmed by the internal register file with CH \\[x \\].BR1.BNDT \\[15:0 \\] |
1359 | =0 ), then the half transfer event is not generated, and the transfer complete event is generated when is completed the loading of the LLI1." ] |
1360 | #[inline (always)] |
1361 | pub const fn tcem(&self) -> super::vals::Tcem { |
1362 | let val = (self.0 >> 30usize) & 0x03; |
1363 | super::vals::Tcem::from_bits(val as u8) |
1364 | } |
1365 | #[doc = "transfer complete event mode. These bits define the transfer granularity for the transfer complete and half transfer complete events generation. Note: If the initial LLI0 data transfer is null/void (directly programmed by the internal register file with CH \\[x \\].BR1.BNDT \\[15:0 \\] |
1366 | = 0), then neither the complete transfer event nor the half transfer event is generated. Note: If the initial LLI0 data transfer is null/void (directly programmed by the internal register file with CH \\[x \\].BR1.BNDT \\[15:0 \\] |
1367 | = 0), then neither the complete transfer event nor the half transfer event is generated. Note: If the initial LLI0 data transfer is null/void (i.e. directly programmed by the internal register file with CH \\[x \\].BR1.BNDT \\[15:0 \\] |
1368 | =0 ), then the half transfer event is not generated, and the transfer complete event is generated when is completed the loading of the LLI1." ] |
1369 | #[inline (always)] |
1370 | pub fn set_tcem(&mut self, val: super::vals::Tcem) { |
1371 | self.0 = (self.0 & !(0x03 << 30usize)) | (((val.to_bits() as u32) & 0x03) << 30usize); |
1372 | } |
1373 | } |
1374 | impl Default for ChTr2 { |
1375 | #[inline (always)] |
1376 | fn default() -> ChTr2 { |
1377 | ChTr2(0) |
1378 | } |
1379 | } |
1380 | impl core::fmt::Debug for ChTr2 { |
1381 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
1382 | f.debug_struct("ChTr2" ) |
1383 | .field("reqsel" , &self.reqsel()) |
1384 | .field("swreq" , &self.swreq()) |
1385 | .field("dreq" , &self.dreq()) |
1386 | .field("breq" , &self.breq()) |
1387 | .field("trigm" , &self.trigm()) |
1388 | .field("trigsel" , &self.trigsel()) |
1389 | .field("trigpol" , &self.trigpol()) |
1390 | .field("tcem" , &self.tcem()) |
1391 | .finish() |
1392 | } |
1393 | } |
1394 | #[cfg (feature = "defmt" )] |
1395 | impl defmt::Format for ChTr2 { |
1396 | fn format(&self, f: defmt::Formatter) { |
1397 | #[derive (defmt :: Format)] |
1398 | struct ChTr2 { |
1399 | reqsel: u8, |
1400 | swreq: super::vals::Swreq, |
1401 | dreq: super::vals::Dreq, |
1402 | breq: super::vals::Breq, |
1403 | trigm: super::vals::Trigm, |
1404 | trigsel: u8, |
1405 | trigpol: super::vals::Trigpol, |
1406 | tcem: super::vals::Tcem, |
1407 | } |
1408 | let proxy = ChTr2 { |
1409 | reqsel: self.reqsel(), |
1410 | swreq: self.swreq(), |
1411 | dreq: self.dreq(), |
1412 | breq: self.breq(), |
1413 | trigm: self.trigm(), |
1414 | trigsel: self.trigsel(), |
1415 | trigpol: self.trigpol(), |
1416 | tcem: self.tcem(), |
1417 | }; |
1418 | defmt::write!(f, "{}" , proxy) |
1419 | } |
1420 | } |
1421 | #[doc = "GPDMA channel 14 transfer register 3" ] |
1422 | #[repr (transparent)] |
1423 | #[derive (Copy, Clone, Eq, PartialEq)] |
1424 | pub struct ChTr3(pub u32); |
1425 | impl ChTr3 { |
1426 | #[doc = "source address offset increment. The source address, pointed by CH \\[x \\].SAR, is incremented or decremented (depending on CH \\[x \\].BR1.SDEC) by this offset SAO \\[12:0 \\] |
1427 | for each programmed source burst. This offset is not including and is added to the programmed burst size when the completed burst is addressed in incremented mode (CH \\[x \\].TR1.SINC = 1). Note: A source address offset must be aligned with the programmed data width of a source burst (SAO \\[2:0 \\] |
1428 | versus CH \\[x \\].TR1.SDW_LOG2 \\[1:0 \\]). Else a user setting error is reported and none transfer is issued. When the source block size is not a multiple of the destination burst size and is a multiple of the source data width, then the last programmed source burst is not completed and is internally shorten to match the block size. In this case, the additional CH \\[x \\].TR3.SAO \\[12:0 \\] |
1429 | is not applied." ] |
1430 | #[inline (always)] |
1431 | pub const fn sao(&self) -> u16 { |
1432 | let val = (self.0 >> 0usize) & 0x1fff; |
1433 | val as u16 |
1434 | } |
1435 | #[doc = "source address offset increment. The source address, pointed by CH \\[x \\].SAR, is incremented or decremented (depending on CH \\[x \\].BR1.SDEC) by this offset SAO \\[12:0 \\] |
1436 | for each programmed source burst. This offset is not including and is added to the programmed burst size when the completed burst is addressed in incremented mode (CH \\[x \\].TR1.SINC = 1). Note: A source address offset must be aligned with the programmed data width of a source burst (SAO \\[2:0 \\] |
1437 | versus CH \\[x \\].TR1.SDW_LOG2 \\[1:0 \\]). Else a user setting error is reported and none transfer is issued. When the source block size is not a multiple of the destination burst size and is a multiple of the source data width, then the last programmed source burst is not completed and is internally shorten to match the block size. In this case, the additional CH \\[x \\].TR3.SAO \\[12:0 \\] |
1438 | is not applied." ] |
1439 | #[inline (always)] |
1440 | pub fn set_sao(&mut self, val: u16) { |
1441 | self.0 = (self.0 & !(0x1fff << 0usize)) | (((val as u32) & 0x1fff) << 0usize); |
1442 | } |
1443 | #[doc = "destination address offset increment. The destination address, pointed by CH \\[x \\].DAR, is incremented or decremented (depending on CH \\[x \\].BR1.DDEC) by this offset DAO \\[12:0 \\] |
1444 | for each programmed destination burst. This offset is not including and is added to the programmed burst size when the completed burst is addressed in incremented mode (CH \\[x \\].TR1.DINC = 1). Note: A destination address offset must be aligned with the programmed data width of a destination burst (DAO \\[2:0 \\] |
1445 | versus CH \\[x \\].TR1.DDW \\[1:0 \\]). Else, a user setting error is reported and no transfer is issued." ] |
1446 | #[inline (always)] |
1447 | pub const fn dao(&self) -> u16 { |
1448 | let val = (self.0 >> 16usize) & 0x1fff; |
1449 | val as u16 |
1450 | } |
1451 | #[doc = "destination address offset increment. The destination address, pointed by CH \\[x \\].DAR, is incremented or decremented (depending on CH \\[x \\].BR1.DDEC) by this offset DAO \\[12:0 \\] |
1452 | for each programmed destination burst. This offset is not including and is added to the programmed burst size when the completed burst is addressed in incremented mode (CH \\[x \\].TR1.DINC = 1). Note: A destination address offset must be aligned with the programmed data width of a destination burst (DAO \\[2:0 \\] |
1453 | versus CH \\[x \\].TR1.DDW \\[1:0 \\]). Else, a user setting error is reported and no transfer is issued." ] |
1454 | #[inline (always)] |
1455 | pub fn set_dao(&mut self, val: u16) { |
1456 | self.0 = (self.0 & !(0x1fff << 16usize)) | (((val as u32) & 0x1fff) << 16usize); |
1457 | } |
1458 | } |
1459 | impl Default for ChTr3 { |
1460 | #[inline (always)] |
1461 | fn default() -> ChTr3 { |
1462 | ChTr3(0) |
1463 | } |
1464 | } |
1465 | impl core::fmt::Debug for ChTr3 { |
1466 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
1467 | f.debug_struct("ChTr3" ) |
1468 | .field("sao" , &self.sao()) |
1469 | .field("dao" , &self.dao()) |
1470 | .finish() |
1471 | } |
1472 | } |
1473 | #[cfg (feature = "defmt" )] |
1474 | impl defmt::Format for ChTr3 { |
1475 | fn format(&self, f: defmt::Formatter) { |
1476 | #[derive (defmt :: Format)] |
1477 | struct ChTr3 { |
1478 | sao: u16, |
1479 | dao: u16, |
1480 | } |
1481 | let proxy = ChTr3 { |
1482 | sao: self.sao(), |
1483 | dao: self.dao(), |
1484 | }; |
1485 | defmt::write!(f, "{}" , proxy) |
1486 | } |
1487 | } |
1488 | #[doc = "GPDMA secure masked interrupt status register" ] |
1489 | #[repr (transparent)] |
1490 | #[derive (Copy, Clone, Eq, PartialEq)] |
1491 | pub struct Misr(pub u32); |
1492 | impl Misr { |
1493 | #[doc = "MIS0" ] |
1494 | #[inline (always)] |
1495 | pub const fn mis(&self, n: usize) -> bool { |
1496 | assert!(n < 16usize); |
1497 | let offs = 0usize + n * 1usize; |
1498 | let val = (self.0 >> offs) & 0x01; |
1499 | val != 0 |
1500 | } |
1501 | #[doc = "MIS0" ] |
1502 | #[inline (always)] |
1503 | pub fn set_mis(&mut self, n: usize, val: bool) { |
1504 | assert!(n < 16usize); |
1505 | let offs = 0usize + n * 1usize; |
1506 | self.0 = (self.0 & !(0x01 << offs)) | (((val as u32) & 0x01) << offs); |
1507 | } |
1508 | } |
1509 | impl Default for Misr { |
1510 | #[inline (always)] |
1511 | fn default() -> Misr { |
1512 | Misr(0) |
1513 | } |
1514 | } |
1515 | impl core::fmt::Debug for Misr { |
1516 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
1517 | f.debug_struct("Misr" ) |
1518 | .field( |
1519 | "mis" , |
1520 | &[ |
1521 | self.mis(0usize), |
1522 | self.mis(1usize), |
1523 | self.mis(2usize), |
1524 | self.mis(3usize), |
1525 | self.mis(4usize), |
1526 | self.mis(5usize), |
1527 | self.mis(6usize), |
1528 | self.mis(7usize), |
1529 | self.mis(8usize), |
1530 | self.mis(9usize), |
1531 | self.mis(10usize), |
1532 | self.mis(11usize), |
1533 | self.mis(12usize), |
1534 | self.mis(13usize), |
1535 | self.mis(14usize), |
1536 | self.mis(15usize), |
1537 | ], |
1538 | ) |
1539 | .finish() |
1540 | } |
1541 | } |
1542 | #[cfg (feature = "defmt" )] |
1543 | impl defmt::Format for Misr { |
1544 | fn format(&self, f: defmt::Formatter) { |
1545 | #[derive (defmt :: Format)] |
1546 | struct Misr { |
1547 | mis: [bool; 16usize], |
1548 | } |
1549 | let proxy = Misr { |
1550 | mis: [ |
1551 | self.mis(0usize), |
1552 | self.mis(1usize), |
1553 | self.mis(2usize), |
1554 | self.mis(3usize), |
1555 | self.mis(4usize), |
1556 | self.mis(5usize), |
1557 | self.mis(6usize), |
1558 | self.mis(7usize), |
1559 | self.mis(8usize), |
1560 | self.mis(9usize), |
1561 | self.mis(10usize), |
1562 | self.mis(11usize), |
1563 | self.mis(12usize), |
1564 | self.mis(13usize), |
1565 | self.mis(14usize), |
1566 | self.mis(15usize), |
1567 | ], |
1568 | }; |
1569 | defmt::write!(f, "{}" , proxy) |
1570 | } |
1571 | } |
1572 | #[doc = "GPDMA privileged configuration register" ] |
1573 | #[repr (transparent)] |
1574 | #[derive (Copy, Clone, Eq, PartialEq)] |
1575 | pub struct Privcfgr(pub u32); |
1576 | impl Privcfgr { |
1577 | #[doc = "PRIV0" ] |
1578 | #[inline (always)] |
1579 | pub const fn priv_(&self, n: usize) -> bool { |
1580 | assert!(n < 16usize); |
1581 | let offs = 0usize + n * 1usize; |
1582 | let val = (self.0 >> offs) & 0x01; |
1583 | val != 0 |
1584 | } |
1585 | #[doc = "PRIV0" ] |
1586 | #[inline (always)] |
1587 | pub fn set_priv_(&mut self, n: usize, val: bool) { |
1588 | assert!(n < 16usize); |
1589 | let offs = 0usize + n * 1usize; |
1590 | self.0 = (self.0 & !(0x01 << offs)) | (((val as u32) & 0x01) << offs); |
1591 | } |
1592 | } |
1593 | impl Default for Privcfgr { |
1594 | #[inline (always)] |
1595 | fn default() -> Privcfgr { |
1596 | Privcfgr(0) |
1597 | } |
1598 | } |
1599 | impl core::fmt::Debug for Privcfgr { |
1600 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
1601 | f.debug_struct("Privcfgr" ) |
1602 | .field( |
1603 | "priv_" , |
1604 | &[ |
1605 | self.priv_(0usize), |
1606 | self.priv_(1usize), |
1607 | self.priv_(2usize), |
1608 | self.priv_(3usize), |
1609 | self.priv_(4usize), |
1610 | self.priv_(5usize), |
1611 | self.priv_(6usize), |
1612 | self.priv_(7usize), |
1613 | self.priv_(8usize), |
1614 | self.priv_(9usize), |
1615 | self.priv_(10usize), |
1616 | self.priv_(11usize), |
1617 | self.priv_(12usize), |
1618 | self.priv_(13usize), |
1619 | self.priv_(14usize), |
1620 | self.priv_(15usize), |
1621 | ], |
1622 | ) |
1623 | .finish() |
1624 | } |
1625 | } |
1626 | #[cfg (feature = "defmt" )] |
1627 | impl defmt::Format for Privcfgr { |
1628 | fn format(&self, f: defmt::Formatter) { |
1629 | #[derive (defmt :: Format)] |
1630 | struct Privcfgr { |
1631 | priv_: [bool; 16usize], |
1632 | } |
1633 | let proxy = Privcfgr { |
1634 | priv_: [ |
1635 | self.priv_(0usize), |
1636 | self.priv_(1usize), |
1637 | self.priv_(2usize), |
1638 | self.priv_(3usize), |
1639 | self.priv_(4usize), |
1640 | self.priv_(5usize), |
1641 | self.priv_(6usize), |
1642 | self.priv_(7usize), |
1643 | self.priv_(8usize), |
1644 | self.priv_(9usize), |
1645 | self.priv_(10usize), |
1646 | self.priv_(11usize), |
1647 | self.priv_(12usize), |
1648 | self.priv_(13usize), |
1649 | self.priv_(14usize), |
1650 | self.priv_(15usize), |
1651 | ], |
1652 | }; |
1653 | defmt::write!(f, "{}" , proxy) |
1654 | } |
1655 | } |
1656 | #[doc = "GPDMA configuration lock register" ] |
1657 | #[repr (transparent)] |
1658 | #[derive (Copy, Clone, Eq, PartialEq)] |
1659 | pub struct Rcfglockr(pub u32); |
1660 | impl Rcfglockr { |
1661 | #[doc = "LOCK0" ] |
1662 | #[inline (always)] |
1663 | pub const fn lock(&self, n: usize) -> bool { |
1664 | assert!(n < 16usize); |
1665 | let offs = 0usize + n * 1usize; |
1666 | let val = (self.0 >> offs) & 0x01; |
1667 | val != 0 |
1668 | } |
1669 | #[doc = "LOCK0" ] |
1670 | #[inline (always)] |
1671 | pub fn set_lock(&mut self, n: usize, val: bool) { |
1672 | assert!(n < 16usize); |
1673 | let offs = 0usize + n * 1usize; |
1674 | self.0 = (self.0 & !(0x01 << offs)) | (((val as u32) & 0x01) << offs); |
1675 | } |
1676 | } |
1677 | impl Default for Rcfglockr { |
1678 | #[inline (always)] |
1679 | fn default() -> Rcfglockr { |
1680 | Rcfglockr(0) |
1681 | } |
1682 | } |
1683 | impl core::fmt::Debug for Rcfglockr { |
1684 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
1685 | f.debug_struct("Rcfglockr" ) |
1686 | .field( |
1687 | "lock" , |
1688 | &[ |
1689 | self.lock(0usize), |
1690 | self.lock(1usize), |
1691 | self.lock(2usize), |
1692 | self.lock(3usize), |
1693 | self.lock(4usize), |
1694 | self.lock(5usize), |
1695 | self.lock(6usize), |
1696 | self.lock(7usize), |
1697 | self.lock(8usize), |
1698 | self.lock(9usize), |
1699 | self.lock(10usize), |
1700 | self.lock(11usize), |
1701 | self.lock(12usize), |
1702 | self.lock(13usize), |
1703 | self.lock(14usize), |
1704 | self.lock(15usize), |
1705 | ], |
1706 | ) |
1707 | .finish() |
1708 | } |
1709 | } |
1710 | #[cfg (feature = "defmt" )] |
1711 | impl defmt::Format for Rcfglockr { |
1712 | fn format(&self, f: defmt::Formatter) { |
1713 | #[derive (defmt :: Format)] |
1714 | struct Rcfglockr { |
1715 | lock: [bool; 16usize], |
1716 | } |
1717 | let proxy = Rcfglockr { |
1718 | lock: [ |
1719 | self.lock(0usize), |
1720 | self.lock(1usize), |
1721 | self.lock(2usize), |
1722 | self.lock(3usize), |
1723 | self.lock(4usize), |
1724 | self.lock(5usize), |
1725 | self.lock(6usize), |
1726 | self.lock(7usize), |
1727 | self.lock(8usize), |
1728 | self.lock(9usize), |
1729 | self.lock(10usize), |
1730 | self.lock(11usize), |
1731 | self.lock(12usize), |
1732 | self.lock(13usize), |
1733 | self.lock(14usize), |
1734 | self.lock(15usize), |
1735 | ], |
1736 | }; |
1737 | defmt::write!(f, "{}" , proxy) |
1738 | } |
1739 | } |
1740 | #[doc = "GPDMA secure configuration register" ] |
1741 | #[repr (transparent)] |
1742 | #[derive (Copy, Clone, Eq, PartialEq)] |
1743 | pub struct Seccfgr(pub u32); |
1744 | impl Seccfgr { |
1745 | #[doc = "SEC0" ] |
1746 | #[inline (always)] |
1747 | pub const fn sec(&self, n: usize) -> bool { |
1748 | assert!(n < 16usize); |
1749 | let offs = 0usize + n * 1usize; |
1750 | let val = (self.0 >> offs) & 0x01; |
1751 | val != 0 |
1752 | } |
1753 | #[doc = "SEC0" ] |
1754 | #[inline (always)] |
1755 | pub fn set_sec(&mut self, n: usize, val: bool) { |
1756 | assert!(n < 16usize); |
1757 | let offs = 0usize + n * 1usize; |
1758 | self.0 = (self.0 & !(0x01 << offs)) | (((val as u32) & 0x01) << offs); |
1759 | } |
1760 | } |
1761 | impl Default for Seccfgr { |
1762 | #[inline (always)] |
1763 | fn default() -> Seccfgr { |
1764 | Seccfgr(0) |
1765 | } |
1766 | } |
1767 | impl core::fmt::Debug for Seccfgr { |
1768 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
1769 | f.debug_struct("Seccfgr" ) |
1770 | .field( |
1771 | "sec" , |
1772 | &[ |
1773 | self.sec(0usize), |
1774 | self.sec(1usize), |
1775 | self.sec(2usize), |
1776 | self.sec(3usize), |
1777 | self.sec(4usize), |
1778 | self.sec(5usize), |
1779 | self.sec(6usize), |
1780 | self.sec(7usize), |
1781 | self.sec(8usize), |
1782 | self.sec(9usize), |
1783 | self.sec(10usize), |
1784 | self.sec(11usize), |
1785 | self.sec(12usize), |
1786 | self.sec(13usize), |
1787 | self.sec(14usize), |
1788 | self.sec(15usize), |
1789 | ], |
1790 | ) |
1791 | .finish() |
1792 | } |
1793 | } |
1794 | #[cfg (feature = "defmt" )] |
1795 | impl defmt::Format for Seccfgr { |
1796 | fn format(&self, f: defmt::Formatter) { |
1797 | #[derive (defmt :: Format)] |
1798 | struct Seccfgr { |
1799 | sec: [bool; 16usize], |
1800 | } |
1801 | let proxy = Seccfgr { |
1802 | sec: [ |
1803 | self.sec(0usize), |
1804 | self.sec(1usize), |
1805 | self.sec(2usize), |
1806 | self.sec(3usize), |
1807 | self.sec(4usize), |
1808 | self.sec(5usize), |
1809 | self.sec(6usize), |
1810 | self.sec(7usize), |
1811 | self.sec(8usize), |
1812 | self.sec(9usize), |
1813 | self.sec(10usize), |
1814 | self.sec(11usize), |
1815 | self.sec(12usize), |
1816 | self.sec(13usize), |
1817 | self.sec(14usize), |
1818 | self.sec(15usize), |
1819 | ], |
1820 | }; |
1821 | defmt::write!(f, "{}" , proxy) |
1822 | } |
1823 | } |
1824 | } |
1825 | pub mod vals { |
1826 | #[repr (u8)] |
1827 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
1828 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
1829 | pub enum Ap { |
1830 | #[doc = "port 0 (AHB) allocated" ] |
1831 | PORT0 = 0x0, |
1832 | #[doc = "port 1 (AHB) allocated" ] |
1833 | PORT1 = 0x01, |
1834 | } |
1835 | impl Ap { |
1836 | #[inline (always)] |
1837 | pub const fn from_bits(val: u8) -> Ap { |
1838 | unsafe { core::mem::transmute(val & 0x01) } |
1839 | } |
1840 | #[inline (always)] |
1841 | pub const fn to_bits(self) -> u8 { |
1842 | unsafe { core::mem::transmute(self) } |
1843 | } |
1844 | } |
1845 | impl From<u8> for Ap { |
1846 | #[inline (always)] |
1847 | fn from(val: u8) -> Ap { |
1848 | Ap::from_bits(val) |
1849 | } |
1850 | } |
1851 | impl From<Ap> for u8 { |
1852 | #[inline (always)] |
1853 | fn from(val: Ap) -> u8 { |
1854 | Ap::to_bits(val) |
1855 | } |
1856 | } |
1857 | #[repr (u8)] |
1858 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
1859 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
1860 | pub enum Breq { |
1861 | #[doc = "the selected hardware request is driven by a peripheral with a hardware request/acknowledge protocol at a burst level." ] |
1862 | BURST = 0x0, |
1863 | #[doc = "the selected hardware request is driven by a peripheral with a hardware request/acknowledge protocol at a block level (see )." ] |
1864 | BLOCK = 0x01, |
1865 | } |
1866 | impl Breq { |
1867 | #[inline (always)] |
1868 | pub const fn from_bits(val: u8) -> Breq { |
1869 | unsafe { core::mem::transmute(val & 0x01) } |
1870 | } |
1871 | #[inline (always)] |
1872 | pub const fn to_bits(self) -> u8 { |
1873 | unsafe { core::mem::transmute(self) } |
1874 | } |
1875 | } |
1876 | impl From<u8> for Breq { |
1877 | #[inline (always)] |
1878 | fn from(val: u8) -> Breq { |
1879 | Breq::from_bits(val) |
1880 | } |
1881 | } |
1882 | impl From<Breq> for u8 { |
1883 | #[inline (always)] |
1884 | fn from(val: Breq) -> u8 { |
1885 | Breq::to_bits(val) |
1886 | } |
1887 | } |
1888 | #[repr (u8)] |
1889 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
1890 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
1891 | pub enum Dec { |
1892 | #[doc = "The address is incremented by the programmed offset." ] |
1893 | ADD = 0x0, |
1894 | #[doc = "The address is decremented by the programmed offset." ] |
1895 | SUBTRACT = 0x01, |
1896 | } |
1897 | impl Dec { |
1898 | #[inline (always)] |
1899 | pub const fn from_bits(val: u8) -> Dec { |
1900 | unsafe { core::mem::transmute(val & 0x01) } |
1901 | } |
1902 | #[inline (always)] |
1903 | pub const fn to_bits(self) -> u8 { |
1904 | unsafe { core::mem::transmute(self) } |
1905 | } |
1906 | } |
1907 | impl From<u8> for Dec { |
1908 | #[inline (always)] |
1909 | fn from(val: u8) -> Dec { |
1910 | Dec::from_bits(val) |
1911 | } |
1912 | } |
1913 | impl From<Dec> for u8 { |
1914 | #[inline (always)] |
1915 | fn from(val: Dec) -> u8 { |
1916 | Dec::to_bits(val) |
1917 | } |
1918 | } |
1919 | #[repr (u8)] |
1920 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
1921 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
1922 | pub enum Dreq { |
1923 | #[doc = "selected hardware request driven by a source peripheral (request signal taken into account by the GPDMA transfer scheduler over the source/read port)" ] |
1924 | SOURCE_PERIPHERAL = 0x0, |
1925 | #[doc = "selected hardware request driven by a destination peripheral (request signal taken into account by the GPDMA transfer scheduler over the destination/write port)" ] |
1926 | DESTINATION_PERIPHERAL = 0x01, |
1927 | } |
1928 | impl Dreq { |
1929 | #[inline (always)] |
1930 | pub const fn from_bits(val: u8) -> Dreq { |
1931 | unsafe { core::mem::transmute(val & 0x01) } |
1932 | } |
1933 | #[inline (always)] |
1934 | pub const fn to_bits(self) -> u8 { |
1935 | unsafe { core::mem::transmute(self) } |
1936 | } |
1937 | } |
1938 | impl From<u8> for Dreq { |
1939 | #[inline (always)] |
1940 | fn from(val: u8) -> Dreq { |
1941 | Dreq::from_bits(val) |
1942 | } |
1943 | } |
1944 | impl From<Dreq> for u8 { |
1945 | #[inline (always)] |
1946 | fn from(val: Dreq) -> u8 { |
1947 | Dreq::to_bits(val) |
1948 | } |
1949 | } |
1950 | #[repr (u8)] |
1951 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
1952 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
1953 | pub enum Dw { |
1954 | #[doc = "byte" ] |
1955 | BYTE = 0x0, |
1956 | #[doc = "half-word (2 bytes)" ] |
1957 | HALF_WORD = 0x01, |
1958 | #[doc = "word (4 bytes)" ] |
1959 | WORD = 0x02, |
1960 | _RESERVED_3 = 0x03, |
1961 | } |
1962 | impl Dw { |
1963 | #[inline (always)] |
1964 | pub const fn from_bits(val: u8) -> Dw { |
1965 | unsafe { core::mem::transmute(val & 0x03) } |
1966 | } |
1967 | #[inline (always)] |
1968 | pub const fn to_bits(self) -> u8 { |
1969 | unsafe { core::mem::transmute(self) } |
1970 | } |
1971 | } |
1972 | impl From<u8> for Dw { |
1973 | #[inline (always)] |
1974 | fn from(val: u8) -> Dw { |
1975 | Dw::from_bits(val) |
1976 | } |
1977 | } |
1978 | impl From<Dw> for u8 { |
1979 | #[inline (always)] |
1980 | fn from(val: Dw) -> u8 { |
1981 | Dw::to_bits(val) |
1982 | } |
1983 | } |
1984 | #[repr (u8)] |
1985 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
1986 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
1987 | pub enum Lsm { |
1988 | #[doc = "channel executed for the full linked-list and completed at the end of the last LLI (CH \\[x \\].LLR = 0). The 16 low-significant bits of the link address are null (LA \\[15:0 \\] |
1989 | = 0) and all the update bits are null (UT1 =UB1 = UT2 = USA = UDA = ULL = 0 and UT3 = UB2 = 0 if present). Then CH \\[x \\].BR1.BNDT \\[15:0 \\] |
1990 | = 0 and CH \\[x \\].BR1.BRC \\[10:0 \\] |
1991 | = 0 if present." ] |
1992 | RUN_TO_COMPLETION = 0x0, |
1993 | #[doc = "channel executed once for the current LLI" ] |
1994 | LINK_STEP = 0x01, |
1995 | } |
1996 | impl Lsm { |
1997 | #[inline (always)] |
1998 | pub const fn from_bits(val: u8) -> Lsm { |
1999 | unsafe { core::mem::transmute(val & 0x01) } |
2000 | } |
2001 | #[inline (always)] |
2002 | pub const fn to_bits(self) -> u8 { |
2003 | unsafe { core::mem::transmute(self) } |
2004 | } |
2005 | } |
2006 | impl From<u8> for Lsm { |
2007 | #[inline (always)] |
2008 | fn from(val: u8) -> Lsm { |
2009 | Lsm::from_bits(val) |
2010 | } |
2011 | } |
2012 | impl From<Lsm> for u8 { |
2013 | #[inline (always)] |
2014 | fn from(val: Lsm) -> u8 { |
2015 | Lsm::to_bits(val) |
2016 | } |
2017 | } |
2018 | #[repr (u8)] |
2019 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
2020 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
2021 | pub enum Pam { |
2022 | #[doc = "If destination is wider: source data is transferred as right aligned, padded with 0s up to the destination data width If source is wider: source data is transferred as right aligned, left-truncated down to the destination data width" ] |
2023 | ZERO_EXTEND_OR_LEFT_TRUNCATE = 0x0, |
2024 | #[doc = "If destination is wider: source data is transferred as right aligned, sign extended up to the destination data width If source is wider: source data is transferred as left-aligned, right-truncated down to the destination data width" ] |
2025 | SIGN_EXTEND_OR_RIGHT_TRUNCATE = 0x01, |
2026 | #[doc = "source data is FIFO queued and packed/unpacked at the destination data width, to be transferred in a left (LSB) to right (MSB) order (named little endian) to the destination" ] |
2027 | PACK = 0x02, |
2028 | _RESERVED_3 = 0x03, |
2029 | } |
2030 | impl Pam { |
2031 | #[inline (always)] |
2032 | pub const fn from_bits(val: u8) -> Pam { |
2033 | unsafe { core::mem::transmute(val & 0x03) } |
2034 | } |
2035 | #[inline (always)] |
2036 | pub const fn to_bits(self) -> u8 { |
2037 | unsafe { core::mem::transmute(self) } |
2038 | } |
2039 | } |
2040 | impl From<u8> for Pam { |
2041 | #[inline (always)] |
2042 | fn from(val: u8) -> Pam { |
2043 | Pam::from_bits(val) |
2044 | } |
2045 | } |
2046 | impl From<Pam> for u8 { |
2047 | #[inline (always)] |
2048 | fn from(val: Pam) -> u8 { |
2049 | Pam::to_bits(val) |
2050 | } |
2051 | } |
2052 | #[repr (u8)] |
2053 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
2054 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
2055 | pub enum Prio { |
2056 | #[doc = "low priority, low weight" ] |
2057 | LOW_WITH_LOWH_WEIGHT = 0x0, |
2058 | #[doc = "low priority, mid weight" ] |
2059 | LOW_WITH_MID_WEIGHT = 0x01, |
2060 | #[doc = "low priority, high weight" ] |
2061 | LOW_WITH_HIGH_WEIGHT = 0x02, |
2062 | #[doc = "high priority" ] |
2063 | HIGH = 0x03, |
2064 | } |
2065 | impl Prio { |
2066 | #[inline (always)] |
2067 | pub const fn from_bits(val: u8) -> Prio { |
2068 | unsafe { core::mem::transmute(val & 0x03) } |
2069 | } |
2070 | #[inline (always)] |
2071 | pub const fn to_bits(self) -> u8 { |
2072 | unsafe { core::mem::transmute(self) } |
2073 | } |
2074 | } |
2075 | impl From<u8> for Prio { |
2076 | #[inline (always)] |
2077 | fn from(val: u8) -> Prio { |
2078 | Prio::from_bits(val) |
2079 | } |
2080 | } |
2081 | impl From<Prio> for u8 { |
2082 | #[inline (always)] |
2083 | fn from(val: Prio) -> u8 { |
2084 | Prio::to_bits(val) |
2085 | } |
2086 | } |
2087 | #[repr (u8)] |
2088 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
2089 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
2090 | pub enum Swreq { |
2091 | #[doc = "no software request. The selected hardware request REQSEL \\[6:0 \\] |
2092 | is taken into account." ] |
2093 | HARDWARE = 0x0, |
2094 | #[doc = "software request for a memory-to-memory transfer. The default selected hardware request as per REQSEL \\[6:0 \\] |
2095 | is ignored." ] |
2096 | SOFTWARE = 0x01, |
2097 | } |
2098 | impl Swreq { |
2099 | #[inline (always)] |
2100 | pub const fn from_bits(val: u8) -> Swreq { |
2101 | unsafe { core::mem::transmute(val & 0x01) } |
2102 | } |
2103 | #[inline (always)] |
2104 | pub const fn to_bits(self) -> u8 { |
2105 | unsafe { core::mem::transmute(self) } |
2106 | } |
2107 | } |
2108 | impl From<u8> for Swreq { |
2109 | #[inline (always)] |
2110 | fn from(val: u8) -> Swreq { |
2111 | Swreq::from_bits(val) |
2112 | } |
2113 | } |
2114 | impl From<Swreq> for u8 { |
2115 | #[inline (always)] |
2116 | fn from(val: Swreq) -> u8 { |
2117 | Swreq::to_bits(val) |
2118 | } |
2119 | } |
2120 | #[repr (u8)] |
2121 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
2122 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
2123 | pub enum Tcem { |
2124 | #[doc = "at block level (when CH \\[x \\].BR1.BNDT \\[15:0 \\] |
2125 | = 0): the complete (and the half) transfer event is generated at the (respectively half of the) end of a block." ] |
2126 | EACH_BLOCK = 0x0, |
2127 | #[doc = "channel x = 0 to 11, same as 00; channel x=12 to 15, at 2D/repeated block level (when CH \\[x \\].BR1.BRC \\[10:0 \\] |
2128 | = 0 and CH \\[x \\].BR1.BNDT \\[15:0 \\] |
2129 | = 0), the complete (and the half) transfer event is generated at the end (respectively half of the end) of the 2D/repeated block." ] |
2130 | EACH2DBLOCK = 0x01, |
2131 | #[doc = "at LLI level: the complete transfer event is generated at the end of the LLI transfer, including the update of the LLI if any. The half transfer event is generated at the half of the LLI data transfer (the LLI data transfer being a block transfer or a 2D/repeated block transfer for channel x = 12 to 15), if any data transfer." ] |
2132 | EACH_LINKED_LIST_ITEM = 0x02, |
2133 | #[doc = "at channel level: the complete transfer event is generated at the end of the last LLI transfer. The half transfer event is generated at the half of the data transfer of the last LLI. The last LLI updates the link address CH \\[x \\].LLR.LA \\[15:2 \\] |
2134 | to zero and clears all the CH \\[x \\].LLR update bits (UT1, UT2, UB1, USA, UDA and ULL, plus UT3 and UB2 if present). If the channel transfer is continuous/infinite, no event is generated." ] |
2135 | LAST_LINKED_LIST_ITEM = 0x03, |
2136 | } |
2137 | impl Tcem { |
2138 | #[inline (always)] |
2139 | pub const fn from_bits(val: u8) -> Tcem { |
2140 | unsafe { core::mem::transmute(val & 0x03) } |
2141 | } |
2142 | #[inline (always)] |
2143 | pub const fn to_bits(self) -> u8 { |
2144 | unsafe { core::mem::transmute(self) } |
2145 | } |
2146 | } |
2147 | impl From<u8> for Tcem { |
2148 | #[inline (always)] |
2149 | fn from(val: u8) -> Tcem { |
2150 | Tcem::from_bits(val) |
2151 | } |
2152 | } |
2153 | impl From<Tcem> for u8 { |
2154 | #[inline (always)] |
2155 | fn from(val: Tcem) -> u8 { |
2156 | Tcem::to_bits(val) |
2157 | } |
2158 | } |
2159 | #[repr (u8)] |
2160 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
2161 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
2162 | pub enum Trigm { |
2163 | #[doc = "at block level: the first burst read of each block transfer is conditioned by one hit trigger (channel x = 12 to 15, for each block if a 2D/repeated block is configured with CH \\[x \\].BR1.BRC \\[10:0 \\] |
2164 | ≠ 0)." ] |
2165 | BLOCK = 0x0, |
2166 | #[doc = "channel x = 0 to 11, same as 00; channel x=12 to 15, at 2D/repeated block level, the" ] |
2167 | _2DBLOCK = 0x01, |
2168 | #[doc = "at link level: a LLI link transfer is conditioned by one hit trigger. The LLI data transfer (if any) is not conditioned." ] |
2169 | LINKED_LIST_ITEM = 0x02, |
2170 | #[doc = "at programmed burst level: If SWREQ = 1, each programmed burst read is conditioned by one hit trigger. If SWREQ = 0, each programmed burst that is requested by the selected peripheral, is conditioned by one hit trigger." ] |
2171 | BURST = 0x03, |
2172 | } |
2173 | impl Trigm { |
2174 | #[inline (always)] |
2175 | pub const fn from_bits(val: u8) -> Trigm { |
2176 | unsafe { core::mem::transmute(val & 0x03) } |
2177 | } |
2178 | #[inline (always)] |
2179 | pub const fn to_bits(self) -> u8 { |
2180 | unsafe { core::mem::transmute(self) } |
2181 | } |
2182 | } |
2183 | impl From<u8> for Trigm { |
2184 | #[inline (always)] |
2185 | fn from(val: u8) -> Trigm { |
2186 | Trigm::from_bits(val) |
2187 | } |
2188 | } |
2189 | impl From<Trigm> for u8 { |
2190 | #[inline (always)] |
2191 | fn from(val: Trigm) -> u8 { |
2192 | Trigm::to_bits(val) |
2193 | } |
2194 | } |
2195 | #[repr (u8)] |
2196 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] |
2197 | #[cfg_attr (feature = "defmt" , derive(defmt::Format))] |
2198 | pub enum Trigpol { |
2199 | #[doc = "no trigger (masked trigger event)" ] |
2200 | NONE = 0x0, |
2201 | #[doc = "trigger on the rising edge" ] |
2202 | RISING_EDGE = 0x01, |
2203 | #[doc = "trigger on the falling edge" ] |
2204 | FALLING_EDGE = 0x02, |
2205 | #[doc = "same as 00" ] |
2206 | NONE_ALT = 0x03, |
2207 | } |
2208 | impl Trigpol { |
2209 | #[inline (always)] |
2210 | pub const fn from_bits(val: u8) -> Trigpol { |
2211 | unsafe { core::mem::transmute(val & 0x03) } |
2212 | } |
2213 | #[inline (always)] |
2214 | pub const fn to_bits(self) -> u8 { |
2215 | unsafe { core::mem::transmute(self) } |
2216 | } |
2217 | } |
2218 | impl From<u8> for Trigpol { |
2219 | #[inline (always)] |
2220 | fn from(val: u8) -> Trigpol { |
2221 | Trigpol::from_bits(val) |
2222 | } |
2223 | } |
2224 | impl From<Trigpol> for u8 { |
2225 | #[inline (always)] |
2226 | fn from(val: Trigpol) -> u8 { |
2227 | Trigpol::to_bits(val) |
2228 | } |
2229 | } |
2230 | } |
2231 | |