1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * STM32 DMA3 controller driver |
4 | * |
5 | * Copyright (C) STMicroelectronics 2024 |
6 | * Author(s): Amelie Delaunay <amelie.delaunay@foss.st.com> |
7 | */ |
8 | |
9 | #include <linux/bitfield.h> |
10 | #include <linux/clk.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/dmaengine.h> |
13 | #include <linux/dmapool.h> |
14 | #include <linux/init.h> |
15 | #include <linux/iopoll.h> |
16 | #include <linux/list.h> |
17 | #include <linux/module.h> |
18 | #include <linux/of_dma.h> |
19 | #include <linux/platform_device.h> |
20 | #include <linux/pm_runtime.h> |
21 | #include <linux/reset.h> |
22 | #include <linux/slab.h> |
23 | |
24 | #include "../virt-dma.h" |
25 | |
26 | #define STM32_DMA3_SECCFGR 0x00 |
27 | #define STM32_DMA3_PRIVCFGR 0x04 |
28 | #define STM32_DMA3_RCFGLOCKR 0x08 |
29 | #define STM32_DMA3_MISR 0x0c |
30 | #define STM32_DMA3_SMISR 0x10 |
31 | |
32 | #define STM32_DMA3_CLBAR(x) (0x50 + 0x80 * (x)) |
33 | #define STM32_DMA3_CCIDCFGR(x) (0x54 + 0x80 * (x)) |
34 | #define STM32_DMA3_CSEMCR(x) (0x58 + 0x80 * (x)) |
35 | #define STM32_DMA3_CFCR(x) (0x5c + 0x80 * (x)) |
36 | #define STM32_DMA3_CSR(x) (0x60 + 0x80 * (x)) |
37 | #define STM32_DMA3_CCR(x) (0x64 + 0x80 * (x)) |
38 | #define STM32_DMA3_CTR1(x) (0x90 + 0x80 * (x)) |
39 | #define STM32_DMA3_CTR2(x) (0x94 + 0x80 * (x)) |
40 | #define STM32_DMA3_CBR1(x) (0x98 + 0x80 * (x)) |
41 | #define STM32_DMA3_CSAR(x) (0x9c + 0x80 * (x)) |
42 | #define STM32_DMA3_CDAR(x) (0xa0 + 0x80 * (x)) |
43 | #define STM32_DMA3_CLLR(x) (0xcc + 0x80 * (x)) |
44 | |
45 | #define STM32_DMA3_HWCFGR13 0xfc0 /* G_PER_CTRL(X) x=8..15 */ |
46 | #define STM32_DMA3_HWCFGR12 0xfc4 /* G_PER_CTRL(X) x=0..7 */ |
47 | #define STM32_DMA3_HWCFGR4 0xfe4 /* G_FIFO_SIZE(X) x=8..15 */ |
48 | #define STM32_DMA3_HWCFGR3 0xfe8 /* G_FIFO_SIZE(X) x=0..7 */ |
49 | #define STM32_DMA3_HWCFGR2 0xfec /* G_MAX_REQ_ID */ |
50 | #define STM32_DMA3_HWCFGR1 0xff0 /* G_MASTER_PORTS, G_NUM_CHANNELS, G_Mx_DATA_WIDTH */ |
51 | #define STM32_DMA3_VERR 0xff4 |
52 | |
53 | /* SECCFGR DMA secure configuration register */ |
54 | #define SECCFGR_SEC(x) BIT(x) |
55 | |
56 | /* MISR DMA non-secure/secure masked interrupt status register */ |
57 | #define MISR_MIS(x) BIT(x) |
58 | |
59 | /* CxLBAR DMA channel x linked_list base address register */ |
60 | #define CLBAR_LBA GENMASK(31, 16) |
61 | |
62 | /* CxCIDCFGR DMA channel x CID register */ |
63 | #define CCIDCFGR_CFEN BIT(0) |
64 | #define CCIDCFGR_SEM_EN BIT(1) |
65 | #define CCIDCFGR_SCID GENMASK(5, 4) |
66 | #define CCIDCFGR_SEM_WLIST_CID0 BIT(16) |
67 | #define CCIDCFGR_SEM_WLIST_CID1 BIT(17) |
68 | #define CCIDCFGR_SEM_WLIST_CID2 BIT(18) |
69 | |
70 | enum ccidcfgr_cid { |
71 | CCIDCFGR_CID0, |
72 | CCIDCFGR_CID1, |
73 | CCIDCFGR_CID2, |
74 | }; |
75 | |
76 | /* CxSEMCR DMA channel x semaphore control register */ |
77 | #define CSEMCR_SEM_MUTEX BIT(0) |
78 | #define CSEMCR_SEM_CCID GENMASK(5, 4) |
79 | |
80 | /* CxFCR DMA channel x flag clear register */ |
81 | #define CFCR_TCF BIT(8) |
82 | #define CFCR_HTF BIT(9) |
83 | #define CFCR_DTEF BIT(10) |
84 | #define CFCR_ULEF BIT(11) |
85 | #define CFCR_USEF BIT(12) |
86 | #define CFCR_SUSPF BIT(13) |
87 | |
88 | /* CxSR DMA channel x status register */ |
89 | #define CSR_IDLEF BIT(0) |
90 | #define CSR_TCF BIT(8) |
91 | #define CSR_HTF BIT(9) |
92 | #define CSR_DTEF BIT(10) |
93 | #define CSR_ULEF BIT(11) |
94 | #define CSR_USEF BIT(12) |
95 | #define CSR_SUSPF BIT(13) |
96 | #define CSR_ALL_F GENMASK(13, 8) |
97 | #define CSR_FIFOL GENMASK(24, 16) |
98 | |
99 | /* CxCR DMA channel x control register */ |
100 | #define CCR_EN BIT(0) |
101 | #define CCR_RESET BIT(1) |
102 | #define CCR_SUSP BIT(2) |
103 | #define CCR_TCIE BIT(8) |
104 | #define CCR_HTIE BIT(9) |
105 | #define CCR_DTEIE BIT(10) |
106 | #define CCR_ULEIE BIT(11) |
107 | #define CCR_USEIE BIT(12) |
108 | #define CCR_SUSPIE BIT(13) |
109 | #define CCR_ALLIE GENMASK(13, 8) |
110 | #define CCR_LSM BIT(16) |
111 | #define CCR_LAP BIT(17) |
112 | #define CCR_PRIO GENMASK(23, 22) |
113 | |
114 | enum ccr_prio { |
115 | CCR_PRIO_LOW, |
116 | CCR_PRIO_MID, |
117 | CCR_PRIO_HIGH, |
118 | CCR_PRIO_VERY_HIGH, |
119 | }; |
120 | |
121 | /* CxTR1 DMA channel x transfer register 1 */ |
122 | #define CTR1_SINC BIT(3) |
123 | #define CTR1_SBL_1 GENMASK(9, 4) |
124 | #define CTR1_DINC BIT(19) |
125 | #define CTR1_DBL_1 GENMASK(25, 20) |
126 | #define CTR1_SDW_LOG2 GENMASK(1, 0) |
127 | #define CTR1_PAM GENMASK(12, 11) |
128 | #define CTR1_SAP BIT(14) |
129 | #define CTR1_DDW_LOG2 GENMASK(17, 16) |
130 | #define CTR1_DAP BIT(30) |
131 | |
132 | enum ctr1_dw { |
133 | CTR1_DW_BYTE, |
134 | CTR1_DW_HWORD, |
135 | CTR1_DW_WORD, |
136 | CTR1_DW_DWORD, /* Depends on HWCFGR1.G_M0_DATA_WIDTH_ENC and .G_M1_DATA_WIDTH_ENC */ |
137 | }; |
138 | |
139 | enum ctr1_pam { |
140 | CTR1_PAM_0S_LT, /* if DDW > SDW, padded with 0s else left-truncated */ |
141 | CTR1_PAM_SE_RT, /* if DDW > SDW, sign extended else right-truncated */ |
142 | CTR1_PAM_PACK_UNPACK, /* FIFO queued */ |
143 | }; |
144 | |
145 | /* CxTR2 DMA channel x transfer register 2 */ |
146 | #define CTR2_REQSEL GENMASK(7, 0) |
147 | #define CTR2_SWREQ BIT(9) |
148 | #define CTR2_DREQ BIT(10) |
149 | #define CTR2_BREQ BIT(11) |
150 | #define CTR2_PFREQ BIT(12) |
151 | #define CTR2_TCEM GENMASK(31, 30) |
152 | |
153 | enum ctr2_tcem { |
154 | CTR2_TCEM_BLOCK, |
155 | CTR2_TCEM_REPEAT_BLOCK, |
156 | CTR2_TCEM_LLI, |
157 | CTR2_TCEM_CHANNEL, |
158 | }; |
159 | |
160 | /* CxBR1 DMA channel x block register 1 */ |
161 | #define CBR1_BNDT GENMASK(15, 0) |
162 | |
163 | /* CxLLR DMA channel x linked-list address register */ |
164 | #define CLLR_LA GENMASK(15, 2) |
165 | #define CLLR_ULL BIT(16) |
166 | #define CLLR_UDA BIT(27) |
167 | #define CLLR_USA BIT(28) |
168 | #define CLLR_UB1 BIT(29) |
169 | #define CLLR_UT2 BIT(30) |
170 | #define CLLR_UT1 BIT(31) |
171 | |
172 | /* HWCFGR13 DMA hardware configuration register 13 x=8..15 */ |
173 | /* HWCFGR12 DMA hardware configuration register 12 x=0..7 */ |
174 | #define G_PER_CTRL(x) (ULL(0x1) << (4 * (x))) |
175 | |
176 | /* HWCFGR4 DMA hardware configuration register 4 x=8..15 */ |
177 | /* HWCFGR3 DMA hardware configuration register 3 x=0..7 */ |
178 | #define G_FIFO_SIZE(x) (ULL(0x7) << (4 * (x))) |
179 | |
180 | #define get_chan_hwcfg(x, mask, reg) (((reg) & (mask)) >> (4 * (x))) |
181 | |
182 | /* HWCFGR2 DMA hardware configuration register 2 */ |
183 | #define G_MAX_REQ_ID GENMASK(7, 0) |
184 | |
185 | /* HWCFGR1 DMA hardware configuration register 1 */ |
186 | #define G_MASTER_PORTS GENMASK(2, 0) |
187 | #define G_NUM_CHANNELS GENMASK(12, 8) |
188 | #define G_M0_DATA_WIDTH_ENC GENMASK(25, 24) |
189 | #define G_M1_DATA_WIDTH_ENC GENMASK(29, 28) |
190 | |
191 | enum stm32_dma3_master_ports { |
192 | AXI64, /* 1x AXI: 64-bit port 0 */ |
193 | AHB32, /* 1x AHB: 32-bit port 0 */ |
194 | AHB32_AHB32, /* 2x AHB: 32-bit port 0 and 32-bit port 1 */ |
195 | AXI64_AHB32, /* 1x AXI 64-bit port 0 and 1x AHB 32-bit port 1 */ |
196 | AXI64_AXI64, /* 2x AXI: 64-bit port 0 and 64-bit port 1 */ |
197 | AXI128_AHB32, /* 1x AXI 128-bit port 0 and 1x AHB 32-bit port 1 */ |
198 | }; |
199 | |
200 | enum stm32_dma3_port_data_width { |
201 | DW_32, /* 32-bit, for AHB */ |
202 | DW_64, /* 64-bit, for AXI */ |
203 | DW_128, /* 128-bit, for AXI */ |
204 | DW_INVALID, |
205 | }; |
206 | |
207 | /* VERR DMA version register */ |
208 | #define VERR_MINREV GENMASK(3, 0) |
209 | #define VERR_MAJREV GENMASK(7, 4) |
210 | |
211 | /* Device tree */ |
212 | /* struct stm32_dma3_dt_conf */ |
213 | /* .ch_conf */ |
214 | #define STM32_DMA3_DT_PRIO GENMASK(1, 0) /* CCR_PRIO */ |
215 | #define STM32_DMA3_DT_FIFO GENMASK(7, 4) |
216 | /* .tr_conf */ |
217 | #define STM32_DMA3_DT_SINC BIT(0) /* CTR1_SINC */ |
218 | #define STM32_DMA3_DT_SAP BIT(1) /* CTR1_SAP */ |
219 | #define STM32_DMA3_DT_DINC BIT(4) /* CTR1_DINC */ |
220 | #define STM32_DMA3_DT_DAP BIT(5) /* CTR1_DAP */ |
221 | #define STM32_DMA3_DT_BREQ BIT(8) /* CTR2_BREQ */ |
222 | #define STM32_DMA3_DT_PFREQ BIT(9) /* CTR2_PFREQ */ |
223 | #define STM32_DMA3_DT_TCEM GENMASK(13, 12) /* CTR2_TCEM */ |
224 | #define STM32_DMA3_DT_NOPACK BIT(16) /* CTR1_PAM */ |
225 | #define STM32_DMA3_DT_NOREFACT BIT(17) |
226 | |
227 | /* struct stm32_dma3_chan .config_set bitfield */ |
228 | #define STM32_DMA3_CFG_SET_DT BIT(0) |
229 | #define STM32_DMA3_CFG_SET_DMA BIT(1) |
230 | #define STM32_DMA3_CFG_SET_BOTH (STM32_DMA3_CFG_SET_DT | STM32_DMA3_CFG_SET_DMA) |
231 | |
232 | #define STM32_DMA3_MAX_BLOCK_SIZE ALIGN_DOWN(CBR1_BNDT, 64) |
233 | #define STM32_DMA3_MAX_BURST_LEN (1 + min_t(u32, FIELD_MAX(CTR1_SBL_1), \ |
234 | FIELD_MAX(CTR1_DBL_1))) |
235 | #define port_is_ahb(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \ |
236 | ((_maxdw) != DW_INVALID) && ((_maxdw) == DW_32); }) |
237 | #define port_is_axi(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \ |
238 | ((_maxdw) != DW_INVALID) && ((_maxdw) != DW_32); }) |
239 | #define get_chan_max_dw(maxdw, maxburst)((port_is_ahb(maxdw) || \ |
240 | (maxburst) < DMA_SLAVE_BUSWIDTH_8_BYTES) ? \ |
241 | DMA_SLAVE_BUSWIDTH_4_BYTES : DMA_SLAVE_BUSWIDTH_8_BYTES) |
242 | |
243 | /* Static linked-list data structure (depends on update bits UT1/UT2/UB1/USA/UDA/ULL) */ |
244 | struct stm32_dma3_hwdesc { |
245 | u32 ctr1; |
246 | u32 ctr2; |
247 | u32 cbr1; |
248 | u32 csar; |
249 | u32 cdar; |
250 | u32 cllr; |
251 | } __packed __aligned(32); |
252 | |
253 | /* |
254 | * CLLR_LA / sizeof(struct stm32_dma3_hwdesc) represents the number of hdwdesc that can be addressed |
255 | * by the pointer to the next linked-list data structure. The __aligned forces the 32-byte |
256 | * alignment. So use hardcoded 32. Multiplied by the max block size of each item, it represents |
257 | * the sg size limitation. |
258 | */ |
259 | #define STM32_DMA3_MAX_SEG_SIZE ((CLLR_LA / 32) * STM32_DMA3_MAX_BLOCK_SIZE) |
260 | |
261 | /* |
262 | * Linked-list items |
263 | */ |
264 | struct stm32_dma3_lli { |
265 | struct stm32_dma3_hwdesc *hwdesc; |
266 | dma_addr_t hwdesc_addr; |
267 | }; |
268 | |
269 | struct stm32_dma3_swdesc { |
270 | struct virt_dma_desc vdesc; |
271 | u32 ccr; |
272 | bool cyclic; |
273 | u32 lli_size; |
274 | struct stm32_dma3_lli lli[] __counted_by(lli_size); |
275 | }; |
276 | |
277 | struct stm32_dma3_dt_conf { |
278 | u32 ch_id; |
279 | u32 req_line; |
280 | u32 ch_conf; |
281 | u32 tr_conf; |
282 | }; |
283 | |
284 | struct stm32_dma3_chan { |
285 | struct virt_dma_chan vchan; |
286 | u32 id; |
287 | int irq; |
288 | u32 fifo_size; |
289 | u32 max_burst; |
290 | bool semaphore_mode; |
291 | struct stm32_dma3_dt_conf dt_config; |
292 | struct dma_slave_config dma_config; |
293 | u8 config_set; |
294 | struct dma_pool *lli_pool; |
295 | struct stm32_dma3_swdesc *swdesc; |
296 | enum ctr2_tcem tcem; |
297 | u32 dma_status; |
298 | }; |
299 | |
300 | struct stm32_dma3_pdata { |
301 | u32 axi_max_burst_len; |
302 | }; |
303 | |
304 | struct stm32_dma3_ddata { |
305 | struct dma_device dma_dev; |
306 | void __iomem *base; |
307 | struct clk *clk; |
308 | struct stm32_dma3_chan *chans; |
309 | u32 dma_channels; |
310 | u32 dma_requests; |
311 | enum stm32_dma3_port_data_width ports_max_dw[2]; |
312 | u32 axi_max_burst_len; |
313 | }; |
314 | |
315 | static inline struct stm32_dma3_ddata *to_stm32_dma3_ddata(struct stm32_dma3_chan *chan) |
316 | { |
317 | return container_of(chan->vchan.chan.device, struct stm32_dma3_ddata, dma_dev); |
318 | } |
319 | |
320 | static inline struct stm32_dma3_chan *to_stm32_dma3_chan(struct dma_chan *c) |
321 | { |
322 | return container_of(c, struct stm32_dma3_chan, vchan.chan); |
323 | } |
324 | |
325 | static inline struct stm32_dma3_swdesc *to_stm32_dma3_swdesc(struct virt_dma_desc *vdesc) |
326 | { |
327 | return container_of(vdesc, struct stm32_dma3_swdesc, vdesc); |
328 | } |
329 | |
330 | static struct device *chan2dev(struct stm32_dma3_chan *chan) |
331 | { |
332 | return &chan->vchan.chan.dev->device; |
333 | } |
334 | |
335 | static void stm32_dma3_chan_dump_reg(struct stm32_dma3_chan *chan) |
336 | { |
337 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
338 | struct device *dev = chan2dev(chan); |
339 | u32 id = chan->id, offset; |
340 | |
341 | offset = STM32_DMA3_SECCFGR; |
342 | dev_dbg(dev, "SECCFGR(0x%03x): %08x\n", offset, readl_relaxed(ddata->base + offset)); |
343 | offset = STM32_DMA3_PRIVCFGR; |
344 | dev_dbg(dev, "PRIVCFGR(0x%03x): %08x\n", offset, readl_relaxed(ddata->base + offset)); |
345 | offset = STM32_DMA3_CCIDCFGR(id); |
346 | dev_dbg(dev, "C%dCIDCFGR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset)); |
347 | offset = STM32_DMA3_CSEMCR(id); |
348 | dev_dbg(dev, "C%dSEMCR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset)); |
349 | offset = STM32_DMA3_CSR(id); |
350 | dev_dbg(dev, "C%dSR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset)); |
351 | offset = STM32_DMA3_CCR(id); |
352 | dev_dbg(dev, "C%dCR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset)); |
353 | offset = STM32_DMA3_CTR1(id); |
354 | dev_dbg(dev, "C%dTR1(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset)); |
355 | offset = STM32_DMA3_CTR2(id); |
356 | dev_dbg(dev, "C%dTR2(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset)); |
357 | offset = STM32_DMA3_CBR1(id); |
358 | dev_dbg(dev, "C%dBR1(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset)); |
359 | offset = STM32_DMA3_CSAR(id); |
360 | dev_dbg(dev, "C%dSAR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset)); |
361 | offset = STM32_DMA3_CDAR(id); |
362 | dev_dbg(dev, "C%dDAR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset)); |
363 | offset = STM32_DMA3_CLLR(id); |
364 | dev_dbg(dev, "C%dLLR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset)); |
365 | offset = STM32_DMA3_CLBAR(id); |
366 | dev_dbg(dev, "C%dLBAR(0x%03x): %08x\n", id, offset, readl_relaxed(ddata->base + offset)); |
367 | } |
368 | |
369 | static void stm32_dma3_chan_dump_hwdesc(struct stm32_dma3_chan *chan, |
370 | struct stm32_dma3_swdesc *swdesc) |
371 | { |
372 | struct stm32_dma3_hwdesc *hwdesc; |
373 | int i; |
374 | |
375 | for (i = 0; i < swdesc->lli_size; i++) { |
376 | hwdesc = swdesc->lli[i].hwdesc; |
377 | if (i) |
378 | dev_dbg(chan2dev(chan), "V\n"); |
379 | dev_dbg(chan2dev(chan), "[%d]@%pad\n", i, &swdesc->lli[i].hwdesc_addr); |
380 | dev_dbg(chan2dev(chan), "| C%dTR1: %08x\n", chan->id, hwdesc->ctr1); |
381 | dev_dbg(chan2dev(chan), "| C%dTR2: %08x\n", chan->id, hwdesc->ctr2); |
382 | dev_dbg(chan2dev(chan), "| C%dBR1: %08x\n", chan->id, hwdesc->cbr1); |
383 | dev_dbg(chan2dev(chan), "| C%dSAR: %08x\n", chan->id, hwdesc->csar); |
384 | dev_dbg(chan2dev(chan), "| C%dDAR: %08x\n", chan->id, hwdesc->cdar); |
385 | dev_dbg(chan2dev(chan), "| C%dLLR: %08x\n", chan->id, hwdesc->cllr); |
386 | } |
387 | |
388 | if (swdesc->cyclic) { |
389 | dev_dbg(chan2dev(chan), "|\n"); |
390 | dev_dbg(chan2dev(chan), "-->[0]@%pad\n", &swdesc->lli[0].hwdesc_addr); |
391 | } else { |
392 | dev_dbg(chan2dev(chan), "X\n"); |
393 | } |
394 | } |
395 | |
396 | static struct stm32_dma3_swdesc *stm32_dma3_chan_desc_alloc(struct stm32_dma3_chan *chan, u32 count) |
397 | { |
398 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
399 | struct stm32_dma3_swdesc *swdesc; |
400 | int i; |
401 | |
402 | /* |
403 | * If the memory to be allocated for the number of hwdesc (6 u32 members but 32-bytes |
404 | * aligned) is greater than the maximum address of CLLR_LA, then the last items can't be |
405 | * addressed, so abort the allocation. |
406 | */ |
407 | if ((count * 32) > CLLR_LA) { |
408 | dev_err(chan2dev(chan), "Transfer is too big (> %luB)\n", STM32_DMA3_MAX_SEG_SIZE); |
409 | return NULL; |
410 | } |
411 | |
412 | swdesc = kzalloc(struct_size(swdesc, lli, count), GFP_NOWAIT); |
413 | if (!swdesc) |
414 | return NULL; |
415 | swdesc->lli_size = count; |
416 | |
417 | for (i = 0; i < count; i++) { |
418 | swdesc->lli[i].hwdesc = dma_pool_zalloc(pool: chan->lli_pool, GFP_NOWAIT, |
419 | handle: &swdesc->lli[i].hwdesc_addr); |
420 | if (!swdesc->lli[i].hwdesc) |
421 | goto err_pool_free; |
422 | } |
423 | swdesc->ccr = 0; |
424 | |
425 | /* Set LL base address */ |
426 | writel_relaxed(swdesc->lli[0].hwdesc_addr & CLBAR_LBA, |
427 | ddata->base + STM32_DMA3_CLBAR(chan->id)); |
428 | |
429 | /* Set LL allocated port */ |
430 | swdesc->ccr &= ~CCR_LAP; |
431 | |
432 | return swdesc; |
433 | |
434 | err_pool_free: |
435 | dev_err(chan2dev(chan), "Failed to alloc descriptors\n"); |
436 | while (--i >= 0) |
437 | dma_pool_free(pool: chan->lli_pool, vaddr: swdesc->lli[i].hwdesc, addr: swdesc->lli[i].hwdesc_addr); |
438 | kfree(objp: swdesc); |
439 | |
440 | return NULL; |
441 | } |
442 | |
443 | static void stm32_dma3_chan_desc_free(struct stm32_dma3_chan *chan, |
444 | struct stm32_dma3_swdesc *swdesc) |
445 | { |
446 | int i; |
447 | |
448 | for (i = 0; i < swdesc->lli_size; i++) |
449 | dma_pool_free(pool: chan->lli_pool, vaddr: swdesc->lli[i].hwdesc, addr: swdesc->lli[i].hwdesc_addr); |
450 | |
451 | kfree(objp: swdesc); |
452 | } |
453 | |
454 | static void stm32_dma3_chan_vdesc_free(struct virt_dma_desc *vdesc) |
455 | { |
456 | struct stm32_dma3_swdesc *swdesc = to_stm32_dma3_swdesc(vdesc); |
457 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c: vdesc->tx.chan); |
458 | |
459 | stm32_dma3_chan_desc_free(chan, swdesc); |
460 | } |
461 | |
462 | static void stm32_dma3_check_user_setting(struct stm32_dma3_chan *chan) |
463 | { |
464 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
465 | struct device *dev = chan2dev(chan); |
466 | u32 ctr1 = readl_relaxed(ddata->base + STM32_DMA3_CTR1(chan->id)); |
467 | u32 cbr1 = readl_relaxed(ddata->base + STM32_DMA3_CBR1(chan->id)); |
468 | u32 csar = readl_relaxed(ddata->base + STM32_DMA3_CSAR(chan->id)); |
469 | u32 cdar = readl_relaxed(ddata->base + STM32_DMA3_CDAR(chan->id)); |
470 | u32 cllr = readl_relaxed(ddata->base + STM32_DMA3_CLLR(chan->id)); |
471 | u32 bndt = FIELD_GET(CBR1_BNDT, cbr1); |
472 | u32 sdw = 1 << FIELD_GET(CTR1_SDW_LOG2, ctr1); |
473 | u32 ddw = 1 << FIELD_GET(CTR1_DDW_LOG2, ctr1); |
474 | u32 sap = FIELD_GET(CTR1_SAP, ctr1); |
475 | u32 dap = FIELD_GET(CTR1_DAP, ctr1); |
476 | |
477 | if (!bndt && !FIELD_GET(CLLR_UB1, cllr)) |
478 | dev_err(dev, "null source block size and no update of this value\n"); |
479 | if (bndt % sdw) |
480 | dev_err(dev, "source block size not multiple of src data width\n"); |
481 | if (FIELD_GET(CTR1_PAM, ctr1) == CTR1_PAM_PACK_UNPACK && bndt % ddw) |
482 | dev_err(dev, "(un)packing mode w/ src block size not multiple of dst data width\n"); |
483 | if (csar % sdw) |
484 | dev_err(dev, "unaligned source address not multiple of src data width\n"); |
485 | if (cdar % ddw) |
486 | dev_err(dev, "unaligned destination address not multiple of dst data width\n"); |
487 | if (sdw == DMA_SLAVE_BUSWIDTH_8_BYTES && port_is_ahb(ddata->ports_max_dw[sap])) |
488 | dev_err(dev, "double-word source data width not supported on port %u\n", sap); |
489 | if (ddw == DMA_SLAVE_BUSWIDTH_8_BYTES && port_is_ahb(ddata->ports_max_dw[dap])) |
490 | dev_err(dev, "double-word destination data width not supported on port %u\n", dap); |
491 | } |
492 | |
493 | static void stm32_dma3_chan_prep_hwdesc(struct stm32_dma3_chan *chan, |
494 | struct stm32_dma3_swdesc *swdesc, |
495 | u32 curr, dma_addr_t src, dma_addr_t dst, u32 len, |
496 | u32 ctr1, u32 ctr2, bool is_last, bool is_cyclic) |
497 | { |
498 | struct stm32_dma3_hwdesc *hwdesc; |
499 | dma_addr_t next_lli; |
500 | u32 next = curr + 1; |
501 | |
502 | hwdesc = swdesc->lli[curr].hwdesc; |
503 | hwdesc->ctr1 = ctr1; |
504 | hwdesc->ctr2 = ctr2; |
505 | hwdesc->cbr1 = FIELD_PREP(CBR1_BNDT, len); |
506 | hwdesc->csar = src; |
507 | hwdesc->cdar = dst; |
508 | |
509 | if (is_last) { |
510 | if (is_cyclic) |
511 | next_lli = swdesc->lli[0].hwdesc_addr; |
512 | else |
513 | next_lli = 0; |
514 | } else { |
515 | next_lli = swdesc->lli[next].hwdesc_addr; |
516 | } |
517 | |
518 | hwdesc->cllr = 0; |
519 | if (next_lli) { |
520 | hwdesc->cllr |= CLLR_UT1 | CLLR_UT2 | CLLR_UB1; |
521 | hwdesc->cllr |= CLLR_USA | CLLR_UDA | CLLR_ULL; |
522 | hwdesc->cllr |= (next_lli & CLLR_LA); |
523 | } |
524 | |
525 | /* |
526 | * Make sure to flush the CPU's write buffers so that the descriptors are ready to be read |
527 | * by DMA3. By explicitly using a write memory barrier here, instead of doing it with writel |
528 | * to enable the channel, we avoid an unnecessary barrier in the case where the descriptors |
529 | * are reused (DMA_CTRL_REUSE). |
530 | */ |
531 | if (is_last) |
532 | dma_wmb(); |
533 | } |
534 | |
535 | static enum dma_slave_buswidth stm32_dma3_get_max_dw(u32 chan_max_burst, |
536 | enum stm32_dma3_port_data_width port_max_dw, |
537 | u32 len, dma_addr_t addr) |
538 | { |
539 | enum dma_slave_buswidth max_dw = get_chan_max_dw(port_max_dw, chan_max_burst); |
540 | |
541 | /* len and addr must be a multiple of dw */ |
542 | return 1 << __ffs(len | addr | max_dw); |
543 | } |
544 | |
545 | static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, |
546 | u32 chan_max_burst, u32 bus_max_burst) |
547 | { |
548 | u32 max_burst = chan_max_burst ? chan_max_burst / dw : 1; |
549 | |
550 | /* len is a multiple of dw, so if len is < chan_max_burst, shorten burst */ |
551 | if (len < chan_max_burst) |
552 | max_burst = len / dw; |
553 | |
554 | /* |
555 | * HW doesn't modify the burst if burst size <= half of the fifo size. |
556 | * If len is not a multiple of burst size, last burst is shortened by HW. |
557 | * Take care of maximum burst supported on interconnect bus. |
558 | */ |
559 | return min_t(u32, max_burst, bus_max_burst); |
560 | } |
561 | |
562 | static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transfer_direction dir, |
563 | u32 *ccr, u32 *ctr1, u32 *ctr2, |
564 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 len) |
565 | { |
566 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
567 | struct dma_device dma_device = ddata->dma_dev; |
568 | u32 src_max_burst = STM32_DMA3_MAX_BURST_LEN, dst_max_burst = STM32_DMA3_MAX_BURST_LEN; |
569 | u32 sdw, ddw, sbl_max, dbl_max, tcem, init_dw, init_bl_max; |
570 | u32 _ctr1 = 0, _ctr2 = 0; |
571 | u32 ch_conf = chan->dt_config.ch_conf; |
572 | u32 tr_conf = chan->dt_config.tr_conf; |
573 | u32 sap = FIELD_GET(STM32_DMA3_DT_SAP, tr_conf), sap_max_dw; |
574 | u32 dap = FIELD_GET(STM32_DMA3_DT_DAP, tr_conf), dap_max_dw; |
575 | |
576 | dev_dbg(chan2dev(chan), "%s from %pad to %pad\n", |
577 | dmaengine_get_direction_text(dir), &src_addr, &dst_addr); |
578 | |
579 | sdw = chan->dma_config.src_addr_width ? : get_chan_max_dw(sap, chan->max_burst); |
580 | ddw = chan->dma_config.dst_addr_width ? : get_chan_max_dw(dap, chan->max_burst); |
581 | sbl_max = chan->dma_config.src_maxburst ? : 1; |
582 | dbl_max = chan->dma_config.dst_maxburst ? : 1; |
583 | |
584 | /* Following conditions would raise User Setting Error interrupt */ |
585 | if (!(dma_device.src_addr_widths & BIT(sdw)) || !(dma_device.dst_addr_widths & BIT(ddw))) { |
586 | dev_err(chan2dev(chan), "Bus width (src=%u, dst=%u) not supported\n", sdw, ddw); |
587 | return -EINVAL; |
588 | } |
589 | |
590 | if (ddata->ports_max_dw[1] == DW_INVALID && (sap || dap)) { |
591 | dev_err(chan2dev(chan), "Only one master port, port 1 is not supported\n"); |
592 | return -EINVAL; |
593 | } |
594 | |
595 | sap_max_dw = ddata->ports_max_dw[sap]; |
596 | dap_max_dw = ddata->ports_max_dw[dap]; |
597 | if ((port_is_ahb(sap_max_dw) && sdw == DMA_SLAVE_BUSWIDTH_8_BYTES) || |
598 | (port_is_ahb(dap_max_dw) && ddw == DMA_SLAVE_BUSWIDTH_8_BYTES)) { |
599 | dev_err(chan2dev(chan), |
600 | "8 bytes buswidth (src=%u, dst=%u) not supported on port (sap=%u, dap=%u\n", |
601 | sdw, ddw, sap, dap); |
602 | return -EINVAL; |
603 | } |
604 | |
605 | if (FIELD_GET(STM32_DMA3_DT_SINC, tr_conf)) |
606 | _ctr1 |= CTR1_SINC; |
607 | if (sap) |
608 | _ctr1 |= CTR1_SAP; |
609 | if (port_is_axi(sap_max_dw)) /* AXI - apply axi maximum burst limitation */ |
610 | src_max_burst = ddata->axi_max_burst_len; |
611 | if (FIELD_GET(STM32_DMA3_DT_DINC, tr_conf)) |
612 | _ctr1 |= CTR1_DINC; |
613 | if (dap) |
614 | _ctr1 |= CTR1_DAP; |
615 | if (port_is_axi(dap_max_dw)) /* AXI - apply axi maximum burst limitation */ |
616 | dst_max_burst = ddata->axi_max_burst_len; |
617 | |
618 | _ctr2 |= FIELD_PREP(CTR2_REQSEL, chan->dt_config.req_line) & ~CTR2_SWREQ; |
619 | if (FIELD_GET(STM32_DMA3_DT_BREQ, tr_conf)) |
620 | _ctr2 |= CTR2_BREQ; |
621 | if (dir == DMA_DEV_TO_MEM && FIELD_GET(STM32_DMA3_DT_PFREQ, tr_conf)) |
622 | _ctr2 |= CTR2_PFREQ; |
623 | tcem = FIELD_GET(STM32_DMA3_DT_TCEM, tr_conf); |
624 | _ctr2 |= FIELD_PREP(CTR2_TCEM, tcem); |
625 | |
626 | /* Store TCEM to know on which event TC flag occurred */ |
627 | chan->tcem = tcem; |
628 | /* Store direction for residue computation */ |
629 | chan->dma_config.direction = dir; |
630 | |
631 | switch (dir) { |
632 | case DMA_MEM_TO_DEV: |
633 | /* Set destination (device) data width and burst */ |
634 | ddw = min_t(u32, ddw, stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, |
635 | len, dst_addr)); |
636 | dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst, |
637 | dst_max_burst)); |
638 | |
639 | /* Set source (memory) data width and burst */ |
640 | sdw = stm32_dma3_get_max_dw(chan_max_burst: chan->max_burst, port_max_dw: sap_max_dw, len, addr: src_addr); |
641 | sbl_max = stm32_dma3_get_max_burst(len, dw: sdw, chan_max_burst: chan->max_burst, bus_max_burst: src_max_burst); |
642 | if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf)) { |
643 | sdw = ddw; |
644 | sbl_max = dbl_max; |
645 | } |
646 | |
647 | _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw)); |
648 | _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1); |
649 | _ctr1 |= FIELD_PREP(CTR1_DDW_LOG2, ilog2(ddw)); |
650 | _ctr1 |= FIELD_PREP(CTR1_DBL_1, dbl_max - 1); |
651 | |
652 | if (ddw != sdw) { |
653 | _ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK); |
654 | /* Should never reach this case as ddw is clamped down */ |
655 | if (len & (ddw - 1)) { |
656 | dev_err(chan2dev(chan), |
657 | "Packing mode is enabled and len is not multiple of ddw"); |
658 | return -EINVAL; |
659 | } |
660 | } |
661 | |
662 | /* dst = dev */ |
663 | _ctr2 |= CTR2_DREQ; |
664 | |
665 | break; |
666 | |
667 | case DMA_DEV_TO_MEM: |
668 | /* Set source (device) data width and burst */ |
669 | sdw = min_t(u32, sdw, stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, |
670 | len, src_addr)); |
671 | sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst, |
672 | src_max_burst)); |
673 | |
674 | /* Set destination (memory) data width and burst */ |
675 | ddw = stm32_dma3_get_max_dw(chan_max_burst: chan->max_burst, port_max_dw: dap_max_dw, len, addr: dst_addr); |
676 | dbl_max = stm32_dma3_get_max_burst(len, dw: ddw, chan_max_burst: chan->max_burst, bus_max_burst: dst_max_burst); |
677 | if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf) || |
678 | ((_ctr2 & CTR2_PFREQ) && ddw > sdw)) { /* Packing to wider ddw not supported */ |
679 | ddw = sdw; |
680 | dbl_max = sbl_max; |
681 | } |
682 | |
683 | _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw)); |
684 | _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1); |
685 | _ctr1 |= FIELD_PREP(CTR1_DDW_LOG2, ilog2(ddw)); |
686 | _ctr1 |= FIELD_PREP(CTR1_DBL_1, dbl_max - 1); |
687 | |
688 | if (ddw != sdw) { |
689 | _ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK); |
690 | /* Should never reach this case as ddw is clamped down */ |
691 | if (len & (ddw - 1)) { |
692 | dev_err(chan2dev(chan), |
693 | "Packing mode is enabled and len is not multiple of ddw\n"); |
694 | return -EINVAL; |
695 | } |
696 | } |
697 | |
698 | /* dst = mem */ |
699 | _ctr2 &= ~CTR2_DREQ; |
700 | |
701 | break; |
702 | |
703 | case DMA_MEM_TO_MEM: |
704 | /* Set source (memory) data width and burst */ |
705 | init_dw = sdw; |
706 | init_bl_max = sbl_max; |
707 | sdw = stm32_dma3_get_max_dw(chan_max_burst: chan->max_burst, port_max_dw: sap_max_dw, len, addr: src_addr); |
708 | sbl_max = stm32_dma3_get_max_burst(len, dw: sdw, chan_max_burst: chan->max_burst, bus_max_burst: src_max_burst); |
709 | if (chan->config_set & STM32_DMA3_CFG_SET_DMA) { |
710 | sdw = min_t(u32, init_dw, sdw); |
711 | sbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, sdw, |
712 | chan->max_burst, |
713 | src_max_burst)); |
714 | } |
715 | |
716 | /* Set destination (memory) data width and burst */ |
717 | init_dw = ddw; |
718 | init_bl_max = dbl_max; |
719 | ddw = stm32_dma3_get_max_dw(chan_max_burst: chan->max_burst, port_max_dw: dap_max_dw, len, addr: dst_addr); |
720 | dbl_max = stm32_dma3_get_max_burst(len, dw: ddw, chan_max_burst: chan->max_burst, bus_max_burst: dst_max_burst); |
721 | if (chan->config_set & STM32_DMA3_CFG_SET_DMA) { |
722 | ddw = min_t(u32, init_dw, ddw); |
723 | dbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, ddw, |
724 | chan->max_burst, |
725 | dst_max_burst)); |
726 | } |
727 | |
728 | _ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw)); |
729 | _ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1); |
730 | _ctr1 |= FIELD_PREP(CTR1_DDW_LOG2, ilog2(ddw)); |
731 | _ctr1 |= FIELD_PREP(CTR1_DBL_1, dbl_max - 1); |
732 | |
733 | if (ddw != sdw) { |
734 | _ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK); |
735 | /* Should never reach this case as ddw is clamped down */ |
736 | if (len & (ddw - 1)) { |
737 | dev_err(chan2dev(chan), |
738 | "Packing mode is enabled and len is not multiple of ddw"); |
739 | return -EINVAL; |
740 | } |
741 | } |
742 | |
743 | /* CTR2_REQSEL/DREQ/BREQ/PFREQ are ignored with CTR2_SWREQ=1 */ |
744 | _ctr2 |= CTR2_SWREQ; |
745 | |
746 | break; |
747 | |
748 | default: |
749 | dev_err(chan2dev(chan), "Direction %s not supported\n", |
750 | dmaengine_get_direction_text(dir)); |
751 | return -EINVAL; |
752 | } |
753 | |
754 | *ccr |= FIELD_PREP(CCR_PRIO, FIELD_GET(STM32_DMA3_DT_PRIO, ch_conf)); |
755 | *ctr1 = _ctr1; |
756 | *ctr2 = _ctr2; |
757 | |
758 | dev_dbg(chan2dev(chan), "%s: sdw=%u bytes sbl=%u beats ddw=%u bytes dbl=%u beats\n", |
759 | __func__, sdw, sbl_max, ddw, dbl_max); |
760 | |
761 | return 0; |
762 | } |
763 | |
764 | static void stm32_dma3_chan_start(struct stm32_dma3_chan *chan) |
765 | { |
766 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
767 | struct virt_dma_desc *vdesc; |
768 | struct stm32_dma3_hwdesc *hwdesc; |
769 | u32 id = chan->id; |
770 | u32 csr, ccr; |
771 | |
772 | vdesc = vchan_next_desc(vc: &chan->vchan); |
773 | if (!vdesc) { |
774 | chan->swdesc = NULL; |
775 | return; |
776 | } |
777 | list_del(entry: &vdesc->node); |
778 | |
779 | chan->swdesc = to_stm32_dma3_swdesc(vdesc); |
780 | hwdesc = chan->swdesc->lli[0].hwdesc; |
781 | |
782 | stm32_dma3_chan_dump_hwdesc(chan, swdesc: chan->swdesc); |
783 | |
784 | writel_relaxed(chan->swdesc->ccr, ddata->base + STM32_DMA3_CCR(id)); |
785 | writel_relaxed(hwdesc->ctr1, ddata->base + STM32_DMA3_CTR1(id)); |
786 | writel_relaxed(hwdesc->ctr2, ddata->base + STM32_DMA3_CTR2(id)); |
787 | writel_relaxed(hwdesc->cbr1, ddata->base + STM32_DMA3_CBR1(id)); |
788 | writel_relaxed(hwdesc->csar, ddata->base + STM32_DMA3_CSAR(id)); |
789 | writel_relaxed(hwdesc->cdar, ddata->base + STM32_DMA3_CDAR(id)); |
790 | writel_relaxed(hwdesc->cllr, ddata->base + STM32_DMA3_CLLR(id)); |
791 | |
792 | /* Clear any pending interrupts */ |
793 | csr = readl_relaxed(ddata->base + STM32_DMA3_CSR(id)); |
794 | if (csr & CSR_ALL_F) |
795 | writel_relaxed(csr, ddata->base + STM32_DMA3_CFCR(id)); |
796 | |
797 | stm32_dma3_chan_dump_reg(chan); |
798 | |
799 | ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(id)); |
800 | writel_relaxed(ccr | CCR_EN, ddata->base + STM32_DMA3_CCR(id)); |
801 | |
802 | chan->dma_status = DMA_IN_PROGRESS; |
803 | |
804 | dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); |
805 | } |
806 | |
807 | static int stm32_dma3_chan_suspend(struct stm32_dma3_chan *chan, bool susp) |
808 | { |
809 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
810 | u32 csr, ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id)) & ~CCR_EN; |
811 | int ret = 0; |
812 | |
813 | if (susp) |
814 | ccr |= CCR_SUSP; |
815 | else |
816 | ccr &= ~CCR_SUSP; |
817 | |
818 | writel_relaxed(ccr, ddata->base + STM32_DMA3_CCR(chan->id)); |
819 | |
820 | if (susp) { |
821 | ret = readl_relaxed_poll_timeout_atomic(ddata->base + STM32_DMA3_CSR(chan->id), csr, |
822 | csr & CSR_SUSPF, 1, 10); |
823 | if (!ret) |
824 | writel_relaxed(CFCR_SUSPF, ddata->base + STM32_DMA3_CFCR(chan->id)); |
825 | |
826 | stm32_dma3_chan_dump_reg(chan); |
827 | } |
828 | |
829 | return ret; |
830 | } |
831 | |
832 | static void stm32_dma3_chan_reset(struct stm32_dma3_chan *chan) |
833 | { |
834 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
835 | u32 ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id)) & ~CCR_EN; |
836 | |
837 | writel_relaxed(ccr |= CCR_RESET, ddata->base + STM32_DMA3_CCR(chan->id)); |
838 | } |
839 | |
840 | static int stm32_dma3_chan_get_curr_hwdesc(struct stm32_dma3_swdesc *swdesc, u32 cllr, u32 *residue) |
841 | { |
842 | u32 i, lli_offset, next_lli_offset = cllr & CLLR_LA; |
843 | |
844 | /* If cllr is null, it means it is either the last or single item */ |
845 | if (!cllr) |
846 | return swdesc->lli_size - 1; |
847 | |
848 | /* In cyclic mode, go fast and first check we are not on the last item */ |
849 | if (swdesc->cyclic && next_lli_offset == (swdesc->lli[0].hwdesc_addr & CLLR_LA)) |
850 | return swdesc->lli_size - 1; |
851 | |
852 | /* As transfer is in progress, look backward from the last item */ |
853 | for (i = swdesc->lli_size - 1; i > 0; i--) { |
854 | *residue += FIELD_GET(CBR1_BNDT, swdesc->lli[i].hwdesc->cbr1); |
855 | lli_offset = swdesc->lli[i].hwdesc_addr & CLLR_LA; |
856 | if (lli_offset == next_lli_offset) |
857 | return i - 1; |
858 | } |
859 | |
860 | return -EINVAL; |
861 | } |
862 | |
863 | static void stm32_dma3_chan_set_residue(struct stm32_dma3_chan *chan, |
864 | struct stm32_dma3_swdesc *swdesc, |
865 | struct dma_tx_state *txstate) |
866 | { |
867 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
868 | struct device *dev = chan2dev(chan); |
869 | struct stm32_dma3_hwdesc *hwdesc; |
870 | u32 residue, curr_lli, csr, cdar, cbr1, cllr, bndt, fifol; |
871 | bool pack_unpack; |
872 | int ret; |
873 | |
874 | csr = readl_relaxed(ddata->base + STM32_DMA3_CSR(chan->id)); |
875 | if (!(csr & CSR_IDLEF) && chan->dma_status != DMA_PAUSED) { |
876 | /* Suspend current transfer to read registers for a snapshot */ |
877 | writel_relaxed(swdesc->ccr | CCR_SUSP, ddata->base + STM32_DMA3_CCR(chan->id)); |
878 | ret = readl_relaxed_poll_timeout_atomic(ddata->base + STM32_DMA3_CSR(chan->id), csr, |
879 | csr & (CSR_SUSPF | CSR_IDLEF), 1, 10); |
880 | |
881 | if (ret || ((csr & CSR_TCF) && (csr & CSR_IDLEF))) { |
882 | writel_relaxed(CFCR_SUSPF, ddata->base + STM32_DMA3_CFCR(chan->id)); |
883 | writel_relaxed(swdesc->ccr, ddata->base + STM32_DMA3_CCR(chan->id)); |
884 | if (ret) |
885 | dev_err(dev, "Channel suspension timeout, csr=%08x\n", csr); |
886 | } |
887 | } |
888 | |
889 | /* If channel is still active (CSR_IDLEF is not set), can't get a reliable residue */ |
890 | if (!(csr & CSR_IDLEF)) |
891 | dev_warn(dev, "Can't get residue: channel still active, csr=%08x\n", csr); |
892 | |
893 | /* |
894 | * If channel is not suspended, but Idle and Transfer Complete are set, |
895 | * linked-list is over, no residue |
896 | */ |
897 | if (!(csr & CSR_SUSPF) && (csr & CSR_TCF) && (csr & CSR_IDLEF)) |
898 | return; |
899 | |
900 | /* Read registers to have a snapshot */ |
901 | cllr = readl_relaxed(ddata->base + STM32_DMA3_CLLR(chan->id)); |
902 | cbr1 = readl_relaxed(ddata->base + STM32_DMA3_CBR1(chan->id)); |
903 | cdar = readl_relaxed(ddata->base + STM32_DMA3_CDAR(chan->id)); |
904 | |
905 | /* Resume current transfer */ |
906 | if (csr & CSR_SUSPF) { |
907 | writel_relaxed(CFCR_SUSPF, ddata->base + STM32_DMA3_CFCR(chan->id)); |
908 | writel_relaxed(swdesc->ccr, ddata->base + STM32_DMA3_CCR(chan->id)); |
909 | } |
910 | |
911 | /* Add current BNDT */ |
912 | bndt = FIELD_GET(CBR1_BNDT, cbr1); |
913 | residue = bndt; |
914 | |
915 | /* Get current hwdesc and cumulate residue of pending hwdesc BNDT */ |
916 | ret = stm32_dma3_chan_get_curr_hwdesc(swdesc, cllr, residue: &residue); |
917 | if (ret < 0) { |
918 | dev_err(chan2dev(chan), "Can't get residue: current hwdesc not found\n"); |
919 | return; |
920 | } |
921 | curr_lli = ret; |
922 | |
923 | /* Read current FIFO level - in units of programmed destination data width */ |
924 | hwdesc = swdesc->lli[curr_lli].hwdesc; |
925 | fifol = FIELD_GET(CSR_FIFOL, csr) * (1 << FIELD_GET(CTR1_DDW_LOG2, hwdesc->ctr1)); |
926 | /* If the FIFO contains as many bytes as its size, it can't contain more */ |
927 | if (fifol == (1 << (chan->fifo_size + 1))) |
928 | goto skip_fifol_update; |
929 | |
930 | /* |
931 | * In case of PACKING (Destination burst length > Source burst length) or UNPACKING |
932 | * (Source burst length > Destination burst length), bytes could be pending in the FIFO |
933 | * (to be packed up to Destination burst length or unpacked into Destination burst length |
934 | * chunks). |
935 | * BNDT is not reliable, as it reflects the number of bytes read from the source but not the |
936 | * number of bytes written to the destination. |
937 | * FIFOL is also not sufficient, because it reflects the number of available write beats in |
938 | * units of Destination data width but not the bytes not yet packed or unpacked. |
939 | * In case of Destination increment DINC, it is possible to compute the number of bytes in |
940 | * the FIFO: |
941 | * fifol_in_bytes = bytes_read - bytes_written. |
942 | */ |
943 | pack_unpack = !!(FIELD_GET(CTR1_PAM, hwdesc->ctr1) == CTR1_PAM_PACK_UNPACK); |
944 | if (pack_unpack && (hwdesc->ctr1 & CTR1_DINC)) { |
945 | int bytes_read = FIELD_GET(CBR1_BNDT, hwdesc->cbr1) - bndt; |
946 | int bytes_written = cdar - hwdesc->cdar; |
947 | |
948 | if (bytes_read > 0) |
949 | fifol = bytes_read - bytes_written; |
950 | } |
951 | |
952 | skip_fifol_update: |
953 | if (fifol) { |
954 | dev_dbg(chan2dev(chan), "%u byte(s) in the FIFO\n", fifol); |
955 | dma_set_in_flight_bytes(state: txstate, in_flight_bytes: fifol); |
956 | /* |
957 | * Residue is already accurate for DMA_MEM_TO_DEV as BNDT reflects data read from |
958 | * the source memory buffer, so just need to add fifol to residue in case of |
959 | * DMA_DEV_TO_MEM transfer because these bytes are not yet written in destination |
960 | * memory buffer. |
961 | */ |
962 | if (chan->dma_config.direction == DMA_DEV_TO_MEM) |
963 | residue += fifol; |
964 | } |
965 | dma_set_residue(state: txstate, residue); |
966 | } |
967 | |
968 | static int stm32_dma3_chan_stop(struct stm32_dma3_chan *chan) |
969 | { |
970 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
971 | u32 ccr; |
972 | int ret = 0; |
973 | |
974 | chan->dma_status = DMA_COMPLETE; |
975 | |
976 | /* Disable interrupts */ |
977 | ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id)); |
978 | writel_relaxed(ccr & ~(CCR_ALLIE | CCR_EN), ddata->base + STM32_DMA3_CCR(chan->id)); |
979 | |
980 | if (!(ccr & CCR_SUSP) && (ccr & CCR_EN)) { |
981 | /* Suspend the channel */ |
982 | ret = stm32_dma3_chan_suspend(chan, susp: true); |
983 | if (ret) |
984 | dev_warn(chan2dev(chan), "%s: timeout, data might be lost\n", __func__); |
985 | } |
986 | |
987 | /* |
988 | * Reset the channel: this causes the reset of the FIFO and the reset of the channel |
989 | * internal state, the reset of CCR_EN and CCR_SUSP bits. |
990 | */ |
991 | stm32_dma3_chan_reset(chan); |
992 | |
993 | return ret; |
994 | } |
995 | |
996 | static void stm32_dma3_chan_complete(struct stm32_dma3_chan *chan) |
997 | { |
998 | if (!chan->swdesc) |
999 | return; |
1000 | |
1001 | vchan_cookie_complete(vd: &chan->swdesc->vdesc); |
1002 | chan->swdesc = NULL; |
1003 | stm32_dma3_chan_start(chan); |
1004 | } |
1005 | |
1006 | static irqreturn_t stm32_dma3_chan_irq(int irq, void *devid) |
1007 | { |
1008 | struct stm32_dma3_chan *chan = devid; |
1009 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
1010 | u32 misr, csr, ccr; |
1011 | |
1012 | spin_lock(lock: &chan->vchan.lock); |
1013 | |
1014 | misr = readl_relaxed(ddata->base + STM32_DMA3_MISR); |
1015 | if (!(misr & MISR_MIS(chan->id))) { |
1016 | spin_unlock(lock: &chan->vchan.lock); |
1017 | return IRQ_NONE; |
1018 | } |
1019 | |
1020 | csr = readl_relaxed(ddata->base + STM32_DMA3_CSR(chan->id)); |
1021 | ccr = readl_relaxed(ddata->base + STM32_DMA3_CCR(chan->id)) & CCR_ALLIE; |
1022 | |
1023 | if (csr & CSR_TCF && ccr & CCR_TCIE) { |
1024 | if (chan->swdesc->cyclic) |
1025 | vchan_cyclic_callback(vd: &chan->swdesc->vdesc); |
1026 | else |
1027 | stm32_dma3_chan_complete(chan); |
1028 | } |
1029 | |
1030 | if (csr & CSR_USEF && ccr & CCR_USEIE) { |
1031 | dev_err(chan2dev(chan), "User setting error\n"); |
1032 | chan->dma_status = DMA_ERROR; |
1033 | /* CCR.EN automatically cleared by HW */ |
1034 | stm32_dma3_check_user_setting(chan); |
1035 | stm32_dma3_chan_reset(chan); |
1036 | } |
1037 | |
1038 | if (csr & CSR_ULEF && ccr & CCR_ULEIE) { |
1039 | dev_err(chan2dev(chan), "Update link transfer error\n"); |
1040 | chan->dma_status = DMA_ERROR; |
1041 | /* CCR.EN automatically cleared by HW */ |
1042 | stm32_dma3_chan_reset(chan); |
1043 | } |
1044 | |
1045 | if (csr & CSR_DTEF && ccr & CCR_DTEIE) { |
1046 | dev_err(chan2dev(chan), "Data transfer error\n"); |
1047 | chan->dma_status = DMA_ERROR; |
1048 | /* CCR.EN automatically cleared by HW */ |
1049 | stm32_dma3_chan_reset(chan); |
1050 | } |
1051 | |
1052 | /* |
1053 | * Half Transfer Interrupt may be disabled but Half Transfer Flag can be set, |
1054 | * ensure HTF flag to be cleared, with other flags. |
1055 | */ |
1056 | csr &= (ccr | CCR_HTIE); |
1057 | |
1058 | if (csr) |
1059 | writel_relaxed(csr, ddata->base + STM32_DMA3_CFCR(chan->id)); |
1060 | |
1061 | spin_unlock(lock: &chan->vchan.lock); |
1062 | |
1063 | return IRQ_HANDLED; |
1064 | } |
1065 | |
1066 | static int stm32_dma3_alloc_chan_resources(struct dma_chan *c) |
1067 | { |
1068 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1069 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
1070 | u32 id = chan->id, csemcr, ccid; |
1071 | int ret; |
1072 | |
1073 | ret = pm_runtime_resume_and_get(dev: ddata->dma_dev.dev); |
1074 | if (ret < 0) |
1075 | return ret; |
1076 | |
1077 | /* Ensure the channel is free */ |
1078 | if (chan->semaphore_mode && |
1079 | readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id)) & CSEMCR_SEM_MUTEX) { |
1080 | ret = -EBUSY; |
1081 | goto err_put_sync; |
1082 | } |
1083 | |
1084 | chan->lli_pool = dmam_pool_create(name: dev_name(dev: &c->dev->device), dev: c->device->dev, |
1085 | size: sizeof(struct stm32_dma3_hwdesc), |
1086 | align: __alignof__(struct stm32_dma3_hwdesc), SZ_64K); |
1087 | if (!chan->lli_pool) { |
1088 | dev_err(chan2dev(chan), "Failed to create LLI pool\n"); |
1089 | ret = -ENOMEM; |
1090 | goto err_put_sync; |
1091 | } |
1092 | |
1093 | /* Take the channel semaphore */ |
1094 | if (chan->semaphore_mode) { |
1095 | writel_relaxed(CSEMCR_SEM_MUTEX, ddata->base + STM32_DMA3_CSEMCR(id)); |
1096 | csemcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(id)); |
1097 | ccid = FIELD_GET(CSEMCR_SEM_CCID, csemcr); |
1098 | /* Check that the channel is well taken */ |
1099 | if (ccid != CCIDCFGR_CID1) { |
1100 | dev_err(chan2dev(chan), "Not under CID1 control (in-use by CID%d)\n", ccid); |
1101 | ret = -EPERM; |
1102 | goto err_pool_destroy; |
1103 | } |
1104 | dev_dbg(chan2dev(chan), "Under CID1 control (semcr=0x%08x)\n", csemcr); |
1105 | } |
1106 | |
1107 | return 0; |
1108 | |
1109 | err_pool_destroy: |
1110 | dmam_pool_destroy(pool: chan->lli_pool); |
1111 | chan->lli_pool = NULL; |
1112 | |
1113 | err_put_sync: |
1114 | pm_runtime_put_sync(dev: ddata->dma_dev.dev); |
1115 | |
1116 | return ret; |
1117 | } |
1118 | |
1119 | static void stm32_dma3_free_chan_resources(struct dma_chan *c) |
1120 | { |
1121 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1122 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
1123 | unsigned long flags; |
1124 | |
1125 | /* Ensure channel is in idle state */ |
1126 | spin_lock_irqsave(&chan->vchan.lock, flags); |
1127 | stm32_dma3_chan_stop(chan); |
1128 | chan->swdesc = NULL; |
1129 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
1130 | |
1131 | vchan_free_chan_resources(vc: to_virt_chan(chan: c)); |
1132 | |
1133 | dmam_pool_destroy(pool: chan->lli_pool); |
1134 | chan->lli_pool = NULL; |
1135 | |
1136 | /* Release the channel semaphore */ |
1137 | if (chan->semaphore_mode) |
1138 | writel_relaxed(0, ddata->base + STM32_DMA3_CSEMCR(chan->id)); |
1139 | |
1140 | pm_runtime_put_sync(dev: ddata->dma_dev.dev); |
1141 | |
1142 | /* Reset configuration */ |
1143 | memset(&chan->dt_config, 0, sizeof(chan->dt_config)); |
1144 | memset(&chan->dma_config, 0, sizeof(chan->dma_config)); |
1145 | chan->config_set = 0; |
1146 | } |
1147 | |
1148 | static u32 stm32_dma3_get_ll_count(struct stm32_dma3_chan *chan, size_t len, bool prevent_refactor) |
1149 | { |
1150 | u32 count; |
1151 | |
1152 | if (prevent_refactor) |
1153 | return DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE); |
1154 | |
1155 | count = len / STM32_DMA3_MAX_BLOCK_SIZE; |
1156 | len -= (len / STM32_DMA3_MAX_BLOCK_SIZE) * STM32_DMA3_MAX_BLOCK_SIZE; |
1157 | |
1158 | if (len >= chan->max_burst) { |
1159 | count += 1; /* len < STM32_DMA3_MAX_BLOCK_SIZE here, so it fits in one item */ |
1160 | len -= (len / chan->max_burst) * chan->max_burst; |
1161 | } |
1162 | |
1163 | /* Unaligned remainder fits in one extra item */ |
1164 | if (len > 0) |
1165 | count += 1; |
1166 | |
1167 | return count; |
1168 | } |
1169 | |
1170 | static void stm32_dma3_init_chan_config_for_memcpy(struct stm32_dma3_chan *chan, |
1171 | dma_addr_t dst, dma_addr_t src) |
1172 | { |
1173 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
1174 | u32 dw = get_chan_max_dw(ddata->ports_max_dw[0], chan->max_burst); /* port 0 by default */ |
1175 | u32 burst = chan->max_burst / dw; |
1176 | |
1177 | /* Initialize dt_config if channel not pre-configured through DT */ |
1178 | if (!(chan->config_set & STM32_DMA3_CFG_SET_DT)) { |
1179 | chan->dt_config.ch_conf = FIELD_PREP(STM32_DMA3_DT_PRIO, CCR_PRIO_VERY_HIGH); |
1180 | chan->dt_config.ch_conf |= FIELD_PREP(STM32_DMA3_DT_FIFO, chan->fifo_size); |
1181 | chan->dt_config.tr_conf = STM32_DMA3_DT_SINC | STM32_DMA3_DT_DINC; |
1182 | chan->dt_config.tr_conf |= FIELD_PREP(STM32_DMA3_DT_TCEM, CTR2_TCEM_CHANNEL); |
1183 | } |
1184 | |
1185 | /* Initialize dma_config if dmaengine_slave_config() not used */ |
1186 | if (!(chan->config_set & STM32_DMA3_CFG_SET_DMA)) { |
1187 | chan->dma_config.src_addr_width = dw; |
1188 | chan->dma_config.dst_addr_width = dw; |
1189 | chan->dma_config.src_maxburst = burst; |
1190 | chan->dma_config.dst_maxburst = burst; |
1191 | chan->dma_config.src_addr = src; |
1192 | chan->dma_config.dst_addr = dst; |
1193 | } |
1194 | } |
1195 | |
1196 | static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_chan *c, |
1197 | dma_addr_t dst, dma_addr_t src, |
1198 | size_t len, unsigned long flags) |
1199 | { |
1200 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1201 | struct stm32_dma3_swdesc *swdesc; |
1202 | size_t next_size, offset; |
1203 | u32 count, i, ctr1, ctr2; |
1204 | bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) || |
1205 | !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf); |
1206 | |
1207 | count = stm32_dma3_get_ll_count(chan, len, prevent_refactor); |
1208 | |
1209 | swdesc = stm32_dma3_chan_desc_alloc(chan, count); |
1210 | if (!swdesc) |
1211 | return NULL; |
1212 | |
1213 | if (chan->config_set != STM32_DMA3_CFG_SET_BOTH) |
1214 | stm32_dma3_init_chan_config_for_memcpy(chan, dst, src); |
1215 | |
1216 | for (i = 0, offset = 0; offset < len; i++, offset += next_size) { |
1217 | size_t remaining; |
1218 | int ret; |
1219 | |
1220 | remaining = len - offset; |
1221 | next_size = min_t(size_t, remaining, STM32_DMA3_MAX_BLOCK_SIZE); |
1222 | |
1223 | if (!prevent_refactor && |
1224 | (next_size < STM32_DMA3_MAX_BLOCK_SIZE && next_size >= chan->max_burst)) |
1225 | next_size = chan->max_burst * (remaining / chan->max_burst); |
1226 | |
1227 | ret = stm32_dma3_chan_prep_hw(chan, dir: DMA_MEM_TO_MEM, ccr: &swdesc->ccr, ctr1: &ctr1, ctr2: &ctr2, |
1228 | src_addr: src + offset, dst_addr: dst + offset, len: next_size); |
1229 | if (ret) |
1230 | goto err_desc_free; |
1231 | |
1232 | stm32_dma3_chan_prep_hwdesc(chan, swdesc, curr: i, src: src + offset, dst: dst + offset, len: next_size, |
1233 | ctr1, ctr2, is_last: next_size == remaining, is_cyclic: false); |
1234 | } |
1235 | |
1236 | /* Enable Errors interrupts */ |
1237 | swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE; |
1238 | /* Enable Transfer state interrupts */ |
1239 | swdesc->ccr |= CCR_TCIE; |
1240 | |
1241 | swdesc->cyclic = false; |
1242 | |
1243 | return vchan_tx_prep(vc: &chan->vchan, vd: &swdesc->vdesc, tx_flags: flags); |
1244 | |
1245 | err_desc_free: |
1246 | stm32_dma3_chan_desc_free(chan, swdesc); |
1247 | |
1248 | return NULL; |
1249 | } |
1250 | |
1251 | static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan *c, |
1252 | struct scatterlist *sgl, |
1253 | unsigned int sg_len, |
1254 | enum dma_transfer_direction dir, |
1255 | unsigned long flags, void *context) |
1256 | { |
1257 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1258 | struct stm32_dma3_swdesc *swdesc; |
1259 | struct scatterlist *sg; |
1260 | size_t len; |
1261 | dma_addr_t sg_addr, dev_addr, src, dst; |
1262 | u32 i, j, count, ctr1, ctr2; |
1263 | bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) || |
1264 | !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf); |
1265 | int ret; |
1266 | |
1267 | count = 0; |
1268 | for_each_sg(sgl, sg, sg_len, i) |
1269 | count += stm32_dma3_get_ll_count(chan, sg_dma_len(sg), prevent_refactor); |
1270 | |
1271 | swdesc = stm32_dma3_chan_desc_alloc(chan, count); |
1272 | if (!swdesc) |
1273 | return NULL; |
1274 | |
1275 | /* sg_len and i correspond to the initial sgl; count and j correspond to the hwdesc LL */ |
1276 | j = 0; |
1277 | for_each_sg(sgl, sg, sg_len, i) { |
1278 | sg_addr = sg_dma_address(sg); |
1279 | dev_addr = (dir == DMA_MEM_TO_DEV) ? chan->dma_config.dst_addr : |
1280 | chan->dma_config.src_addr; |
1281 | len = sg_dma_len(sg); |
1282 | |
1283 | do { |
1284 | size_t chunk = min_t(size_t, len, STM32_DMA3_MAX_BLOCK_SIZE); |
1285 | |
1286 | if (!prevent_refactor && |
1287 | (chunk < STM32_DMA3_MAX_BLOCK_SIZE && chunk >= chan->max_burst)) |
1288 | chunk = chan->max_burst * (len / chan->max_burst); |
1289 | |
1290 | if (dir == DMA_MEM_TO_DEV) { |
1291 | src = sg_addr; |
1292 | dst = dev_addr; |
1293 | |
1294 | ret = stm32_dma3_chan_prep_hw(chan, dir, ccr: &swdesc->ccr, ctr1: &ctr1, ctr2: &ctr2, |
1295 | src_addr: src, dst_addr: dst, len: chunk); |
1296 | |
1297 | if (FIELD_GET(CTR1_DINC, ctr1)) |
1298 | dev_addr += chunk; |
1299 | } else { /* (dir == DMA_DEV_TO_MEM || dir == DMA_MEM_TO_MEM) */ |
1300 | src = dev_addr; |
1301 | dst = sg_addr; |
1302 | |
1303 | ret = stm32_dma3_chan_prep_hw(chan, dir, ccr: &swdesc->ccr, ctr1: &ctr1, ctr2: &ctr2, |
1304 | src_addr: src, dst_addr: dst, len: chunk); |
1305 | |
1306 | if (FIELD_GET(CTR1_SINC, ctr1)) |
1307 | dev_addr += chunk; |
1308 | } |
1309 | |
1310 | if (ret) |
1311 | goto err_desc_free; |
1312 | |
1313 | stm32_dma3_chan_prep_hwdesc(chan, swdesc, curr: j, src, dst, len: chunk, |
1314 | ctr1, ctr2, is_last: j == (count - 1), is_cyclic: false); |
1315 | |
1316 | sg_addr += chunk; |
1317 | len -= chunk; |
1318 | j++; |
1319 | } while (len); |
1320 | } |
1321 | |
1322 | if (count != sg_len && chan->tcem != CTR2_TCEM_CHANNEL) |
1323 | dev_warn(chan2dev(chan), "Linked-list refactored, %d items instead of %d\n", |
1324 | count, sg_len); |
1325 | |
1326 | /* Enable Error interrupts */ |
1327 | swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE; |
1328 | /* Enable Transfer state interrupts */ |
1329 | swdesc->ccr |= CCR_TCIE; |
1330 | |
1331 | swdesc->cyclic = false; |
1332 | |
1333 | return vchan_tx_prep(vc: &chan->vchan, vd: &swdesc->vdesc, tx_flags: flags); |
1334 | |
1335 | err_desc_free: |
1336 | stm32_dma3_chan_desc_free(chan, swdesc); |
1337 | |
1338 | return NULL; |
1339 | } |
1340 | |
1341 | static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_cyclic(struct dma_chan *c, |
1342 | dma_addr_t buf_addr, |
1343 | size_t buf_len, size_t period_len, |
1344 | enum dma_transfer_direction dir, |
1345 | unsigned long flags) |
1346 | { |
1347 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1348 | struct stm32_dma3_swdesc *swdesc; |
1349 | dma_addr_t src, dst; |
1350 | u32 count, i, ctr1, ctr2; |
1351 | int ret; |
1352 | |
1353 | if (!buf_len || !period_len || period_len > STM32_DMA3_MAX_BLOCK_SIZE) { |
1354 | dev_err(chan2dev(chan), "Invalid buffer/period length\n"); |
1355 | return NULL; |
1356 | } |
1357 | |
1358 | if (buf_len % period_len) { |
1359 | dev_err(chan2dev(chan), "Buffer length not multiple of period length\n"); |
1360 | return NULL; |
1361 | } |
1362 | |
1363 | count = buf_len / period_len; |
1364 | swdesc = stm32_dma3_chan_desc_alloc(chan, count); |
1365 | if (!swdesc) |
1366 | return NULL; |
1367 | |
1368 | if (dir == DMA_MEM_TO_DEV) { |
1369 | src = buf_addr; |
1370 | dst = chan->dma_config.dst_addr; |
1371 | |
1372 | ret = stm32_dma3_chan_prep_hw(chan, dir: DMA_MEM_TO_DEV, ccr: &swdesc->ccr, ctr1: &ctr1, ctr2: &ctr2, |
1373 | src_addr: src, dst_addr: dst, len: period_len); |
1374 | } else if (dir == DMA_DEV_TO_MEM) { |
1375 | src = chan->dma_config.src_addr; |
1376 | dst = buf_addr; |
1377 | |
1378 | ret = stm32_dma3_chan_prep_hw(chan, dir: DMA_DEV_TO_MEM, ccr: &swdesc->ccr, ctr1: &ctr1, ctr2: &ctr2, |
1379 | src_addr: src, dst_addr: dst, len: period_len); |
1380 | } else { |
1381 | dev_err(chan2dev(chan), "Invalid direction\n"); |
1382 | ret = -EINVAL; |
1383 | } |
1384 | |
1385 | if (ret) |
1386 | goto err_desc_free; |
1387 | |
1388 | for (i = 0; i < count; i++) { |
1389 | if (dir == DMA_MEM_TO_DEV) { |
1390 | src = buf_addr + i * period_len; |
1391 | dst = chan->dma_config.dst_addr; |
1392 | } else { /* (dir == DMA_DEV_TO_MEM) */ |
1393 | src = chan->dma_config.src_addr; |
1394 | dst = buf_addr + i * period_len; |
1395 | } |
1396 | |
1397 | stm32_dma3_chan_prep_hwdesc(chan, swdesc, curr: i, src, dst, len: period_len, |
1398 | ctr1, ctr2, is_last: i == (count - 1), is_cyclic: true); |
1399 | } |
1400 | |
1401 | /* Enable Error interrupts */ |
1402 | swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE; |
1403 | /* Enable Transfer state interrupts */ |
1404 | swdesc->ccr |= CCR_TCIE; |
1405 | |
1406 | swdesc->cyclic = true; |
1407 | |
1408 | return vchan_tx_prep(vc: &chan->vchan, vd: &swdesc->vdesc, tx_flags: flags); |
1409 | |
1410 | err_desc_free: |
1411 | stm32_dma3_chan_desc_free(chan, swdesc); |
1412 | |
1413 | return NULL; |
1414 | } |
1415 | |
1416 | static void stm32_dma3_caps(struct dma_chan *c, struct dma_slave_caps *caps) |
1417 | { |
1418 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1419 | |
1420 | if (!chan->fifo_size) { |
1421 | caps->max_burst = 0; |
1422 | caps->src_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); |
1423 | caps->dst_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); |
1424 | } else { |
1425 | /* Burst transfer should not exceed half of the fifo size */ |
1426 | caps->max_burst = chan->max_burst; |
1427 | if (caps->max_burst < DMA_SLAVE_BUSWIDTH_8_BYTES) { |
1428 | caps->src_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); |
1429 | caps->dst_addr_widths &= ~BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); |
1430 | } |
1431 | } |
1432 | } |
1433 | |
1434 | static int stm32_dma3_config(struct dma_chan *c, struct dma_slave_config *config) |
1435 | { |
1436 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1437 | |
1438 | memcpy(&chan->dma_config, config, sizeof(*config)); |
1439 | chan->config_set |= STM32_DMA3_CFG_SET_DMA; |
1440 | |
1441 | return 0; |
1442 | } |
1443 | |
1444 | static int stm32_dma3_pause(struct dma_chan *c) |
1445 | { |
1446 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1447 | int ret; |
1448 | |
1449 | ret = stm32_dma3_chan_suspend(chan, susp: true); |
1450 | if (ret) |
1451 | return ret; |
1452 | |
1453 | chan->dma_status = DMA_PAUSED; |
1454 | |
1455 | dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan); |
1456 | |
1457 | return 0; |
1458 | } |
1459 | |
1460 | static int stm32_dma3_resume(struct dma_chan *c) |
1461 | { |
1462 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1463 | |
1464 | stm32_dma3_chan_suspend(chan, susp: false); |
1465 | |
1466 | chan->dma_status = DMA_IN_PROGRESS; |
1467 | |
1468 | dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan); |
1469 | |
1470 | return 0; |
1471 | } |
1472 | |
1473 | static int stm32_dma3_terminate_all(struct dma_chan *c) |
1474 | { |
1475 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1476 | unsigned long flags; |
1477 | LIST_HEAD(head); |
1478 | |
1479 | spin_lock_irqsave(&chan->vchan.lock, flags); |
1480 | |
1481 | if (chan->swdesc) { |
1482 | vchan_terminate_vdesc(vd: &chan->swdesc->vdesc); |
1483 | chan->swdesc = NULL; |
1484 | } |
1485 | |
1486 | stm32_dma3_chan_stop(chan); |
1487 | |
1488 | vchan_get_all_descriptors(vc: &chan->vchan, head: &head); |
1489 | |
1490 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
1491 | vchan_dma_desc_free_list(vc: &chan->vchan, head: &head); |
1492 | |
1493 | dev_dbg(chan2dev(chan), "vchan %pK: terminated\n", &chan->vchan); |
1494 | |
1495 | return 0; |
1496 | } |
1497 | |
1498 | static void stm32_dma3_synchronize(struct dma_chan *c) |
1499 | { |
1500 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1501 | |
1502 | vchan_synchronize(vc: &chan->vchan); |
1503 | } |
1504 | |
1505 | static enum dma_status stm32_dma3_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
1506 | struct dma_tx_state *txstate) |
1507 | { |
1508 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1509 | struct stm32_dma3_swdesc *swdesc = NULL; |
1510 | enum dma_status status; |
1511 | unsigned long flags; |
1512 | struct virt_dma_desc *vd; |
1513 | |
1514 | status = dma_cookie_status(chan: c, cookie, state: txstate); |
1515 | if (status == DMA_COMPLETE) |
1516 | return status; |
1517 | |
1518 | if (!txstate) |
1519 | return chan->dma_status; |
1520 | |
1521 | spin_lock_irqsave(&chan->vchan.lock, flags); |
1522 | |
1523 | vd = vchan_find_desc(&chan->vchan, cookie); |
1524 | if (vd) |
1525 | swdesc = to_stm32_dma3_swdesc(vdesc: vd); |
1526 | else if (chan->swdesc && chan->swdesc->vdesc.tx.cookie == cookie) |
1527 | swdesc = chan->swdesc; |
1528 | |
1529 | /* Get residue/in_flight_bytes only if a transfer is currently running (swdesc != NULL) */ |
1530 | if (swdesc) |
1531 | stm32_dma3_chan_set_residue(chan, swdesc, txstate); |
1532 | |
1533 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
1534 | |
1535 | return chan->dma_status; |
1536 | } |
1537 | |
1538 | static void stm32_dma3_issue_pending(struct dma_chan *c) |
1539 | { |
1540 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1541 | unsigned long flags; |
1542 | |
1543 | spin_lock_irqsave(&chan->vchan.lock, flags); |
1544 | |
1545 | if (vchan_issue_pending(vc: &chan->vchan) && !chan->swdesc) { |
1546 | dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan); |
1547 | stm32_dma3_chan_start(chan); |
1548 | } |
1549 | |
1550 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
1551 | } |
1552 | |
1553 | static bool stm32_dma3_filter_fn(struct dma_chan *c, void *fn_param) |
1554 | { |
1555 | struct stm32_dma3_chan *chan = to_stm32_dma3_chan(c); |
1556 | struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan); |
1557 | struct stm32_dma3_dt_conf *conf = fn_param; |
1558 | u32 mask, semcr; |
1559 | int ret; |
1560 | |
1561 | dev_dbg(c->device->dev, "%s(%s): req_line=%d ch_conf=%08x tr_conf=%08x\n", |
1562 | __func__, dma_chan_name(c), conf->req_line, conf->ch_conf, conf->tr_conf); |
1563 | |
1564 | if (!of_property_read_u32(np: c->device->dev->of_node, propname: "dma-channel-mask", out_value: &mask)) |
1565 | if (!(mask & BIT(chan->id))) |
1566 | return false; |
1567 | |
1568 | ret = pm_runtime_resume_and_get(dev: ddata->dma_dev.dev); |
1569 | if (ret < 0) |
1570 | return false; |
1571 | semcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id)); |
1572 | pm_runtime_put_sync(dev: ddata->dma_dev.dev); |
1573 | |
1574 | /* Check if chan is free */ |
1575 | if (semcr & CSEMCR_SEM_MUTEX) |
1576 | return false; |
1577 | |
1578 | /* Check if chan fifo fits well */ |
1579 | if (FIELD_GET(STM32_DMA3_DT_FIFO, conf->ch_conf) != chan->fifo_size) |
1580 | return false; |
1581 | |
1582 | return true; |
1583 | } |
1584 | |
1585 | static struct dma_chan *stm32_dma3_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) |
1586 | { |
1587 | struct stm32_dma3_ddata *ddata = ofdma->of_dma_data; |
1588 | dma_cap_mask_t mask = ddata->dma_dev.cap_mask; |
1589 | struct stm32_dma3_dt_conf conf; |
1590 | struct stm32_dma3_chan *chan; |
1591 | struct dma_chan *c; |
1592 | |
1593 | if (dma_spec->args_count < 3) { |
1594 | dev_err(ddata->dma_dev.dev, "Invalid args count\n"); |
1595 | return NULL; |
1596 | } |
1597 | |
1598 | conf.req_line = dma_spec->args[0]; |
1599 | conf.ch_conf = dma_spec->args[1]; |
1600 | conf.tr_conf = dma_spec->args[2]; |
1601 | |
1602 | if (conf.req_line >= ddata->dma_requests) { |
1603 | dev_err(ddata->dma_dev.dev, "Invalid request line\n"); |
1604 | return NULL; |
1605 | } |
1606 | |
1607 | /* Request dma channel among the generic dma controller list */ |
1608 | c = dma_request_channel(mask, stm32_dma3_filter_fn, &conf); |
1609 | if (!c) { |
1610 | dev_err(ddata->dma_dev.dev, "No suitable channel found\n"); |
1611 | return NULL; |
1612 | } |
1613 | |
1614 | chan = to_stm32_dma3_chan(c); |
1615 | chan->dt_config = conf; |
1616 | chan->config_set |= STM32_DMA3_CFG_SET_DT; |
1617 | |
1618 | return c; |
1619 | } |
1620 | |
1621 | static u32 stm32_dma3_check_rif(struct stm32_dma3_ddata *ddata) |
1622 | { |
1623 | u32 chan_reserved, mask = 0, i, ccidcfgr, invalid_cid = 0; |
1624 | |
1625 | /* Reserve Secure channels */ |
1626 | chan_reserved = readl_relaxed(ddata->base + STM32_DMA3_SECCFGR); |
1627 | |
1628 | /* |
1629 | * CID filtering must be configured to ensure that the DMA3 channel will inherit the CID of |
1630 | * the processor which is configuring and using the given channel. |
1631 | * In case CID filtering is not configured, dma-channel-mask property can be used to |
1632 | * specify available DMA channels to the kernel. |
1633 | */ |
1634 | of_property_read_u32(np: ddata->dma_dev.dev->of_node, propname: "dma-channel-mask", out_value: &mask); |
1635 | |
1636 | /* Reserve !CID-filtered not in dma-channel-mask, static CID != CID1, CID1 not allowed */ |
1637 | for (i = 0; i < ddata->dma_channels; i++) { |
1638 | ccidcfgr = readl_relaxed(ddata->base + STM32_DMA3_CCIDCFGR(i)); |
1639 | |
1640 | if (!(ccidcfgr & CCIDCFGR_CFEN)) { /* !CID-filtered */ |
1641 | invalid_cid |= BIT(i); |
1642 | if (!(mask & BIT(i))) /* Not in dma-channel-mask */ |
1643 | chan_reserved |= BIT(i); |
1644 | } else { /* CID-filtered */ |
1645 | if (!(ccidcfgr & CCIDCFGR_SEM_EN)) { /* Static CID mode */ |
1646 | if (FIELD_GET(CCIDCFGR_SCID, ccidcfgr) != CCIDCFGR_CID1) |
1647 | chan_reserved |= BIT(i); |
1648 | } else { /* Semaphore mode */ |
1649 | if (!FIELD_GET(CCIDCFGR_SEM_WLIST_CID1, ccidcfgr)) |
1650 | chan_reserved |= BIT(i); |
1651 | ddata->chans[i].semaphore_mode = true; |
1652 | } |
1653 | } |
1654 | dev_dbg(ddata->dma_dev.dev, "chan%d: %s mode, %s\n", i, |
1655 | !(ccidcfgr & CCIDCFGR_CFEN) ? "!CID-filtered": |
1656 | ddata->chans[i].semaphore_mode ? "Semaphore": "Static CID", |
1657 | (chan_reserved & BIT(i)) ? "denied": |
1658 | mask & BIT(i) ? "force allowed": "allowed"); |
1659 | } |
1660 | |
1661 | if (invalid_cid) |
1662 | dev_warn(ddata->dma_dev.dev, "chan%*pbl have invalid CID configuration\n", |
1663 | ddata->dma_channels, &invalid_cid); |
1664 | |
1665 | return chan_reserved; |
1666 | } |
1667 | |
1668 | static struct stm32_dma3_pdata stm32mp25_pdata = { |
1669 | .axi_max_burst_len = 16, |
1670 | }; |
1671 | |
1672 | static const struct of_device_id stm32_dma3_of_match[] = { |
1673 | { .compatible = "st,stm32mp25-dma3", .data = &stm32mp25_pdata, }, |
1674 | { /* sentinel */ }, |
1675 | }; |
1676 | MODULE_DEVICE_TABLE(of, stm32_dma3_of_match); |
1677 | |
1678 | static int stm32_dma3_probe(struct platform_device *pdev) |
1679 | { |
1680 | struct device_node *np = pdev->dev.of_node; |
1681 | const struct stm32_dma3_pdata *pdata; |
1682 | struct stm32_dma3_ddata *ddata; |
1683 | struct reset_control *reset; |
1684 | struct stm32_dma3_chan *chan; |
1685 | struct dma_device *dma_dev; |
1686 | u32 master_ports, chan_reserved, i, verr; |
1687 | u64 hwcfgr; |
1688 | int ret; |
1689 | |
1690 | ddata = devm_kzalloc(dev: &pdev->dev, size: sizeof(*ddata), GFP_KERNEL); |
1691 | if (!ddata) |
1692 | return -ENOMEM; |
1693 | platform_set_drvdata(pdev, data: ddata); |
1694 | |
1695 | dma_dev = &ddata->dma_dev; |
1696 | |
1697 | ddata->base = devm_platform_ioremap_resource(pdev, index: 0); |
1698 | if (IS_ERR(ptr: ddata->base)) |
1699 | return PTR_ERR(ptr: ddata->base); |
1700 | |
1701 | ddata->clk = devm_clk_get(dev: &pdev->dev, NULL); |
1702 | if (IS_ERR(ptr: ddata->clk)) |
1703 | return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: ddata->clk), fmt: "Failed to get clk\n"); |
1704 | |
1705 | reset = devm_reset_control_get_optional(dev: &pdev->dev, NULL); |
1706 | if (IS_ERR(ptr: reset)) |
1707 | return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: reset), fmt: "Failed to get reset\n"); |
1708 | |
1709 | ret = clk_prepare_enable(clk: ddata->clk); |
1710 | if (ret) |
1711 | return dev_err_probe(dev: &pdev->dev, err: ret, fmt: "Failed to enable clk\n"); |
1712 | |
1713 | reset_control_reset(rstc: reset); |
1714 | |
1715 | INIT_LIST_HEAD(list: &dma_dev->channels); |
1716 | |
1717 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); |
1718 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); |
1719 | dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); |
1720 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); |
1721 | dma_dev->dev = &pdev->dev; |
1722 | /* |
1723 | * This controller supports up to 8-byte buswidth depending on the port used and the |
1724 | * channel, and can only access address at even boundaries, multiple of the buswidth. |
1725 | */ |
1726 | dma_dev->copy_align = DMAENGINE_ALIGN_8_BYTES; |
1727 | dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1728 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | |
1729 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | |
1730 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); |
1731 | dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1732 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | |
1733 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | |
1734 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); |
1735 | dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | BIT(DMA_MEM_TO_MEM); |
1736 | |
1737 | dma_dev->descriptor_reuse = true; |
1738 | dma_dev->max_sg_burst = STM32_DMA3_MAX_SEG_SIZE; |
1739 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1740 | dma_dev->device_alloc_chan_resources = stm32_dma3_alloc_chan_resources; |
1741 | dma_dev->device_free_chan_resources = stm32_dma3_free_chan_resources; |
1742 | dma_dev->device_prep_dma_memcpy = stm32_dma3_prep_dma_memcpy; |
1743 | dma_dev->device_prep_slave_sg = stm32_dma3_prep_slave_sg; |
1744 | dma_dev->device_prep_dma_cyclic = stm32_dma3_prep_dma_cyclic; |
1745 | dma_dev->device_caps = stm32_dma3_caps; |
1746 | dma_dev->device_config = stm32_dma3_config; |
1747 | dma_dev->device_pause = stm32_dma3_pause; |
1748 | dma_dev->device_resume = stm32_dma3_resume; |
1749 | dma_dev->device_terminate_all = stm32_dma3_terminate_all; |
1750 | dma_dev->device_synchronize = stm32_dma3_synchronize; |
1751 | dma_dev->device_tx_status = stm32_dma3_tx_status; |
1752 | dma_dev->device_issue_pending = stm32_dma3_issue_pending; |
1753 | |
1754 | /* if dma_channels is not modified, get it from hwcfgr1 */ |
1755 | if (of_property_read_u32(np, propname: "dma-channels", out_value: &ddata->dma_channels)) { |
1756 | hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR1); |
1757 | ddata->dma_channels = FIELD_GET(G_NUM_CHANNELS, hwcfgr); |
1758 | } |
1759 | |
1760 | /* if dma_requests is not modified, get it from hwcfgr2 */ |
1761 | if (of_property_read_u32(np, propname: "dma-requests", out_value: &ddata->dma_requests)) { |
1762 | hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR2); |
1763 | ddata->dma_requests = FIELD_GET(G_MAX_REQ_ID, hwcfgr) + 1; |
1764 | } |
1765 | |
1766 | /* G_MASTER_PORTS, G_M0_DATA_WIDTH_ENC, G_M1_DATA_WIDTH_ENC in HWCFGR1 */ |
1767 | hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR1); |
1768 | master_ports = FIELD_GET(G_MASTER_PORTS, hwcfgr); |
1769 | |
1770 | ddata->ports_max_dw[0] = FIELD_GET(G_M0_DATA_WIDTH_ENC, hwcfgr); |
1771 | if (master_ports == AXI64 || master_ports == AHB32) /* Single master port */ |
1772 | ddata->ports_max_dw[1] = DW_INVALID; |
1773 | else /* Dual master ports */ |
1774 | ddata->ports_max_dw[1] = FIELD_GET(G_M1_DATA_WIDTH_ENC, hwcfgr); |
1775 | |
1776 | /* axi_max_burst_len is optional, if not defined, use STM32_DMA3_MAX_BURST_LEN */ |
1777 | ddata->axi_max_burst_len = STM32_DMA3_MAX_BURST_LEN; |
1778 | pdata = device_get_match_data(dev: &pdev->dev); |
1779 | if (pdata && pdata->axi_max_burst_len) { |
1780 | ddata->axi_max_burst_len = min_t(u32, pdata->axi_max_burst_len, |
1781 | STM32_DMA3_MAX_BURST_LEN); |
1782 | dev_dbg(&pdev->dev, "Burst is limited to %u beats through AXI port\n", |
1783 | ddata->axi_max_burst_len); |
1784 | } |
1785 | |
1786 | ddata->chans = devm_kcalloc(dev: &pdev->dev, n: ddata->dma_channels, size: sizeof(*ddata->chans), |
1787 | GFP_KERNEL); |
1788 | if (!ddata->chans) { |
1789 | ret = -ENOMEM; |
1790 | goto err_clk_disable; |
1791 | } |
1792 | |
1793 | chan_reserved = stm32_dma3_check_rif(ddata); |
1794 | |
1795 | if (chan_reserved == GENMASK(ddata->dma_channels - 1, 0)) { |
1796 | ret = -ENODEV; |
1797 | dev_err_probe(dev: &pdev->dev, err: ret, fmt: "No channel available, abort registration\n"); |
1798 | goto err_clk_disable; |
1799 | } |
1800 | |
1801 | /* G_FIFO_SIZE x=0..7 in HWCFGR3 and G_FIFO_SIZE x=8..15 in HWCFGR4 */ |
1802 | hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR3); |
1803 | hwcfgr |= ((u64)readl_relaxed(ddata->base + STM32_DMA3_HWCFGR4)) << 32; |
1804 | |
1805 | for (i = 0; i < ddata->dma_channels; i++) { |
1806 | if (chan_reserved & BIT(i)) |
1807 | continue; |
1808 | |
1809 | chan = &ddata->chans[i]; |
1810 | chan->id = i; |
1811 | chan->fifo_size = get_chan_hwcfg(i, G_FIFO_SIZE(i), hwcfgr); |
1812 | /* If chan->fifo_size > 0 then half of the fifo size, else no burst when no FIFO */ |
1813 | chan->max_burst = (chan->fifo_size) ? (1 << (chan->fifo_size + 1)) / 2 : 0; |
1814 | } |
1815 | |
1816 | ret = dmaenginem_async_device_register(device: dma_dev); |
1817 | if (ret) |
1818 | goto err_clk_disable; |
1819 | |
1820 | for (i = 0; i < ddata->dma_channels; i++) { |
1821 | char name[12]; |
1822 | |
1823 | if (chan_reserved & BIT(i)) |
1824 | continue; |
1825 | |
1826 | chan = &ddata->chans[i]; |
1827 | snprintf(buf: name, size: sizeof(name), fmt: "dma%dchan%d", ddata->dma_dev.dev_id, chan->id); |
1828 | |
1829 | chan->vchan.desc_free = stm32_dma3_chan_vdesc_free; |
1830 | vchan_init(vc: &chan->vchan, dmadev: dma_dev); |
1831 | |
1832 | ret = dma_async_device_channel_register(device: &ddata->dma_dev, chan: &chan->vchan.chan, name); |
1833 | if (ret) { |
1834 | dev_err_probe(dev: &pdev->dev, err: ret, fmt: "Failed to register channel %s\n", name); |
1835 | goto err_clk_disable; |
1836 | } |
1837 | |
1838 | ret = platform_get_irq(pdev, i); |
1839 | if (ret < 0) |
1840 | goto err_clk_disable; |
1841 | chan->irq = ret; |
1842 | |
1843 | ret = devm_request_irq(dev: &pdev->dev, irq: chan->irq, handler: stm32_dma3_chan_irq, irqflags: 0, |
1844 | devname: dev_name(dev: chan2dev(chan)), dev_id: chan); |
1845 | if (ret) { |
1846 | dev_err_probe(dev: &pdev->dev, err: ret, fmt: "Failed to request channel %s IRQ\n", |
1847 | dev_name(dev: chan2dev(chan))); |
1848 | goto err_clk_disable; |
1849 | } |
1850 | } |
1851 | |
1852 | ret = of_dma_controller_register(np, of_dma_xlate: stm32_dma3_of_xlate, data: ddata); |
1853 | if (ret) { |
1854 | dev_err_probe(dev: &pdev->dev, err: ret, fmt: "Failed to register controller\n"); |
1855 | goto err_clk_disable; |
1856 | } |
1857 | |
1858 | verr = readl_relaxed(ddata->base + STM32_DMA3_VERR); |
1859 | |
1860 | pm_runtime_set_active(dev: &pdev->dev); |
1861 | pm_runtime_enable(dev: &pdev->dev); |
1862 | pm_runtime_get_noresume(dev: &pdev->dev); |
1863 | pm_runtime_put(dev: &pdev->dev); |
1864 | |
1865 | dev_info(&pdev->dev, "STM32 DMA3 registered rev:%lu.%lu\n", |
1866 | FIELD_GET(VERR_MAJREV, verr), FIELD_GET(VERR_MINREV, verr)); |
1867 | |
1868 | return 0; |
1869 | |
1870 | err_clk_disable: |
1871 | clk_disable_unprepare(clk: ddata->clk); |
1872 | |
1873 | return ret; |
1874 | } |
1875 | |
1876 | static void stm32_dma3_remove(struct platform_device *pdev) |
1877 | { |
1878 | pm_runtime_disable(dev: &pdev->dev); |
1879 | } |
1880 | |
1881 | static int stm32_dma3_runtime_suspend(struct device *dev) |
1882 | { |
1883 | struct stm32_dma3_ddata *ddata = dev_get_drvdata(dev); |
1884 | |
1885 | clk_disable_unprepare(clk: ddata->clk); |
1886 | |
1887 | return 0; |
1888 | } |
1889 | |
1890 | static int stm32_dma3_runtime_resume(struct device *dev) |
1891 | { |
1892 | struct stm32_dma3_ddata *ddata = dev_get_drvdata(dev); |
1893 | int ret; |
1894 | |
1895 | ret = clk_prepare_enable(clk: ddata->clk); |
1896 | if (ret) |
1897 | dev_err(dev, "Failed to enable clk: %d\n", ret); |
1898 | |
1899 | return ret; |
1900 | } |
1901 | |
1902 | static const struct dev_pm_ops stm32_dma3_pm_ops = { |
1903 | SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) |
1904 | RUNTIME_PM_OPS(stm32_dma3_runtime_suspend, stm32_dma3_runtime_resume, NULL) |
1905 | }; |
1906 | |
1907 | static struct platform_driver stm32_dma3_driver = { |
1908 | .probe = stm32_dma3_probe, |
1909 | .remove = stm32_dma3_remove, |
1910 | .driver = { |
1911 | .name = "stm32-dma3", |
1912 | .of_match_table = stm32_dma3_of_match, |
1913 | .pm = pm_ptr(&stm32_dma3_pm_ops), |
1914 | }, |
1915 | }; |
1916 | |
1917 | static int __init stm32_dma3_init(void) |
1918 | { |
1919 | return platform_driver_register(&stm32_dma3_driver); |
1920 | } |
1921 | |
1922 | subsys_initcall(stm32_dma3_init); |
1923 | |
1924 | MODULE_DESCRIPTION("STM32 DMA3 controller driver"); |
1925 | MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@foss.st.com>"); |
1926 | MODULE_LICENSE("GPL"); |
1927 |
Definitions
- ccidcfgr_cid
- ccr_prio
- ctr1_dw
- ctr1_pam
- ctr2_tcem
- stm32_dma3_master_ports
- stm32_dma3_port_data_width
- stm32_dma3_hwdesc
- stm32_dma3_lli
- stm32_dma3_swdesc
- stm32_dma3_dt_conf
- stm32_dma3_chan
- stm32_dma3_pdata
- stm32_dma3_ddata
- to_stm32_dma3_ddata
- to_stm32_dma3_chan
- to_stm32_dma3_swdesc
- chan2dev
- stm32_dma3_chan_dump_reg
- stm32_dma3_chan_dump_hwdesc
- stm32_dma3_chan_desc_alloc
- stm32_dma3_chan_desc_free
- stm32_dma3_chan_vdesc_free
- stm32_dma3_check_user_setting
- stm32_dma3_chan_prep_hwdesc
- stm32_dma3_get_max_dw
- stm32_dma3_get_max_burst
- stm32_dma3_chan_prep_hw
- stm32_dma3_chan_start
- stm32_dma3_chan_suspend
- stm32_dma3_chan_reset
- stm32_dma3_chan_get_curr_hwdesc
- stm32_dma3_chan_set_residue
- stm32_dma3_chan_stop
- stm32_dma3_chan_complete
- stm32_dma3_chan_irq
- stm32_dma3_alloc_chan_resources
- stm32_dma3_free_chan_resources
- stm32_dma3_get_ll_count
- stm32_dma3_init_chan_config_for_memcpy
- stm32_dma3_prep_dma_memcpy
- stm32_dma3_prep_slave_sg
- stm32_dma3_prep_dma_cyclic
- stm32_dma3_caps
- stm32_dma3_config
- stm32_dma3_pause
- stm32_dma3_resume
- stm32_dma3_terminate_all
- stm32_dma3_synchronize
- stm32_dma3_tx_status
- stm32_dma3_issue_pending
- stm32_dma3_filter_fn
- stm32_dma3_of_xlate
- stm32_dma3_check_rif
- stm32mp25_pdata
- stm32_dma3_of_match
- stm32_dma3_probe
- stm32_dma3_remove
- stm32_dma3_runtime_suspend
- stm32_dma3_runtime_resume
- stm32_dma3_pm_ops
- stm32_dma3_driver
Improve your Profiling and Debugging skills
Find out more