1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // |
3 | // drivers/dma/imx-dma.c |
4 | // |
5 | // This file contains a driver for the Freescale i.MX DMA engine |
6 | // found on i.MX1/21/27 |
7 | // |
8 | // Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> |
9 | // Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> |
10 | |
11 | #include <linux/err.h> |
12 | #include <linux/init.h> |
13 | #include <linux/types.h> |
14 | #include <linux/mm.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/device.h> |
18 | #include <linux/dma-mapping.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/platform_device.h> |
21 | #include <linux/clk.h> |
22 | #include <linux/dmaengine.h> |
23 | #include <linux/module.h> |
24 | #include <linux/of.h> |
25 | #include <linux/of_dma.h> |
26 | |
27 | #include <asm/irq.h> |
28 | #include <linux/dma/imx-dma.h> |
29 | |
30 | #include "dmaengine.h" |
31 | #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 |
32 | #define IMX_DMA_CHANNELS 16 |
33 | |
34 | #define IMX_DMA_2D_SLOTS 2 |
35 | #define IMX_DMA_2D_SLOT_A 0 |
36 | #define IMX_DMA_2D_SLOT_B 1 |
37 | |
38 | #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) |
39 | #define IMX_DMA_MEMSIZE_32 (0 << 4) |
40 | #define IMX_DMA_MEMSIZE_8 (1 << 4) |
41 | #define IMX_DMA_MEMSIZE_16 (2 << 4) |
42 | #define IMX_DMA_TYPE_LINEAR (0 << 10) |
43 | #define IMX_DMA_TYPE_2D (1 << 10) |
44 | #define IMX_DMA_TYPE_FIFO (2 << 10) |
45 | |
46 | #define IMX_DMA_ERR_BURST (1 << 0) |
47 | #define IMX_DMA_ERR_REQUEST (1 << 1) |
48 | #define IMX_DMA_ERR_TRANSFER (1 << 2) |
49 | #define IMX_DMA_ERR_BUFFER (1 << 3) |
50 | #define IMX_DMA_ERR_TIMEOUT (1 << 4) |
51 | |
52 | #define DMA_DCR 0x00 /* Control Register */ |
53 | #define DMA_DISR 0x04 /* Interrupt status Register */ |
54 | #define DMA_DIMR 0x08 /* Interrupt mask Register */ |
55 | #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ |
56 | #define DMA_DRTOSR 0x10 /* Request timeout Register */ |
57 | #define DMA_DSESR 0x14 /* Transfer Error Status Register */ |
58 | #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ |
59 | #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ |
60 | #define DMA_WSRA 0x40 /* W-Size Register A */ |
61 | #define DMA_XSRA 0x44 /* X-Size Register A */ |
62 | #define DMA_YSRA 0x48 /* Y-Size Register A */ |
63 | #define DMA_WSRB 0x4c /* W-Size Register B */ |
64 | #define DMA_XSRB 0x50 /* X-Size Register B */ |
65 | #define DMA_YSRB 0x54 /* Y-Size Register B */ |
66 | #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ |
67 | #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ |
68 | #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ |
69 | #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ |
70 | #define (x) (0x90 + ((x) << 6)) /* Request source select Registers */ |
71 | #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ |
72 | #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ |
73 | #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ |
74 | #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ |
75 | |
76 | #define DCR_DRST (1<<1) |
77 | #define DCR_DEN (1<<0) |
78 | #define DBTOCR_EN (1<<15) |
79 | #define DBTOCR_CNT(x) ((x) & 0x7fff) |
80 | #define CNTR_CNT(x) ((x) & 0xffffff) |
81 | #define CCR_ACRPT (1<<14) |
82 | #define CCR_DMOD_LINEAR (0x0 << 12) |
83 | #define CCR_DMOD_2D (0x1 << 12) |
84 | #define CCR_DMOD_FIFO (0x2 << 12) |
85 | #define CCR_DMOD_EOBFIFO (0x3 << 12) |
86 | #define CCR_SMOD_LINEAR (0x0 << 10) |
87 | #define CCR_SMOD_2D (0x1 << 10) |
88 | #define CCR_SMOD_FIFO (0x2 << 10) |
89 | #define CCR_SMOD_EOBFIFO (0x3 << 10) |
90 | #define CCR_MDIR_DEC (1<<9) |
91 | #define CCR_MSEL_B (1<<8) |
92 | #define CCR_DSIZ_32 (0x0 << 6) |
93 | #define CCR_DSIZ_8 (0x1 << 6) |
94 | #define CCR_DSIZ_16 (0x2 << 6) |
95 | #define CCR_SSIZ_32 (0x0 << 4) |
96 | #define CCR_SSIZ_8 (0x1 << 4) |
97 | #define CCR_SSIZ_16 (0x2 << 4) |
98 | #define CCR_REN (1<<3) |
99 | #define CCR_RPT (1<<2) |
100 | #define CCR_FRC (1<<1) |
101 | #define CCR_CEN (1<<0) |
102 | #define RTOR_EN (1<<15) |
103 | #define RTOR_CLK (1<<14) |
104 | #define RTOR_PSC (1<<13) |
105 | |
106 | enum imxdma_prep_type { |
107 | IMXDMA_DESC_MEMCPY, |
108 | IMXDMA_DESC_INTERLEAVED, |
109 | IMXDMA_DESC_SLAVE_SG, |
110 | IMXDMA_DESC_CYCLIC, |
111 | }; |
112 | |
113 | struct imx_dma_2d_config { |
114 | u16 xsr; |
115 | u16 ysr; |
116 | u16 wsr; |
117 | int count; |
118 | }; |
119 | |
120 | struct imxdma_desc { |
121 | struct list_head node; |
122 | struct dma_async_tx_descriptor desc; |
123 | enum dma_status status; |
124 | dma_addr_t src; |
125 | dma_addr_t dest; |
126 | size_t len; |
127 | enum dma_transfer_direction direction; |
128 | enum imxdma_prep_type type; |
129 | /* For memcpy and interleaved */ |
130 | unsigned int config_port; |
131 | unsigned int config_mem; |
132 | /* For interleaved transfers */ |
133 | unsigned int x; |
134 | unsigned int y; |
135 | unsigned int w; |
136 | /* For slave sg and cyclic */ |
137 | struct scatterlist *sg; |
138 | unsigned int sgcount; |
139 | }; |
140 | |
141 | struct imxdma_channel { |
142 | int hw_chaining; |
143 | struct timer_list watchdog; |
144 | struct imxdma_engine *imxdma; |
145 | unsigned int channel; |
146 | |
147 | struct tasklet_struct dma_tasklet; |
148 | struct list_head ld_free; |
149 | struct list_head ld_queue; |
150 | struct list_head ld_active; |
151 | int descs_allocated; |
152 | enum dma_slave_buswidth word_size; |
153 | dma_addr_t per_address; |
154 | u32 watermark_level; |
155 | struct dma_chan chan; |
156 | struct dma_async_tx_descriptor desc; |
157 | enum dma_status status; |
158 | int dma_request; |
159 | struct scatterlist *sg_list; |
160 | u32 ccr_from_device; |
161 | u32 ccr_to_device; |
162 | bool enabled_2d; |
163 | int slot_2d; |
164 | unsigned int irq; |
165 | struct dma_slave_config config; |
166 | }; |
167 | |
168 | enum imx_dma_type { |
169 | IMX1_DMA, |
170 | IMX21_DMA, |
171 | IMX27_DMA, |
172 | }; |
173 | |
174 | struct imxdma_engine { |
175 | struct device *dev; |
176 | struct dma_device dma_device; |
177 | void __iomem *base; |
178 | struct clk *dma_ahb; |
179 | struct clk *dma_ipg; |
180 | spinlock_t lock; |
181 | struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; |
182 | struct imxdma_channel channel[IMX_DMA_CHANNELS]; |
183 | enum imx_dma_type devtype; |
184 | unsigned int irq; |
185 | unsigned int irq_err; |
186 | |
187 | }; |
188 | |
189 | struct imxdma_filter_data { |
190 | struct imxdma_engine *imxdma; |
191 | int request; |
192 | }; |
193 | |
194 | static const struct of_device_id imx_dma_of_dev_id[] = { |
195 | { |
196 | .compatible = "fsl,imx1-dma" , .data = (const void *)IMX1_DMA, |
197 | }, { |
198 | .compatible = "fsl,imx21-dma" , .data = (const void *)IMX21_DMA, |
199 | }, { |
200 | .compatible = "fsl,imx27-dma" , .data = (const void *)IMX27_DMA, |
201 | }, { |
202 | /* sentinel */ |
203 | } |
204 | }; |
205 | MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id); |
206 | |
207 | static inline int is_imx1_dma(struct imxdma_engine *imxdma) |
208 | { |
209 | return imxdma->devtype == IMX1_DMA; |
210 | } |
211 | |
212 | static inline int is_imx27_dma(struct imxdma_engine *imxdma) |
213 | { |
214 | return imxdma->devtype == IMX27_DMA; |
215 | } |
216 | |
217 | static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) |
218 | { |
219 | return container_of(chan, struct imxdma_channel, chan); |
220 | } |
221 | |
222 | static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) |
223 | { |
224 | struct imxdma_desc *desc; |
225 | |
226 | if (!list_empty(head: &imxdmac->ld_active)) { |
227 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, |
228 | node); |
229 | if (desc->type == IMXDMA_DESC_CYCLIC) |
230 | return true; |
231 | } |
232 | return false; |
233 | } |
234 | |
235 | |
236 | |
237 | static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val, |
238 | unsigned offset) |
239 | { |
240 | __raw_writel(val, addr: imxdma->base + offset); |
241 | } |
242 | |
243 | static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset) |
244 | { |
245 | return __raw_readl(addr: imxdma->base + offset); |
246 | } |
247 | |
248 | static int imxdma_hw_chain(struct imxdma_channel *imxdmac) |
249 | { |
250 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
251 | |
252 | if (is_imx27_dma(imxdma)) |
253 | return imxdmac->hw_chaining; |
254 | else |
255 | return 0; |
256 | } |
257 | |
258 | /* |
259 | * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation |
260 | */ |
261 | static inline void imxdma_sg_next(struct imxdma_desc *d) |
262 | { |
263 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan: d->desc.chan); |
264 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
265 | struct scatterlist *sg = d->sg; |
266 | size_t now; |
267 | |
268 | now = min_t(size_t, d->len, sg_dma_len(sg)); |
269 | if (d->len != IMX_DMA_LENGTH_LOOP) |
270 | d->len -= now; |
271 | |
272 | if (d->direction == DMA_DEV_TO_MEM) |
273 | imx_dmav1_writel(imxdma, val: sg->dma_address, |
274 | DMA_DAR(imxdmac->channel)); |
275 | else |
276 | imx_dmav1_writel(imxdma, val: sg->dma_address, |
277 | DMA_SAR(imxdmac->channel)); |
278 | |
279 | imx_dmav1_writel(imxdma, val: now, DMA_CNTR(imxdmac->channel)); |
280 | |
281 | dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, " |
282 | "size 0x%08x\n" , __func__, imxdmac->channel, |
283 | imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), |
284 | imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), |
285 | imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); |
286 | } |
287 | |
288 | static void imxdma_enable_hw(struct imxdma_desc *d) |
289 | { |
290 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan: d->desc.chan); |
291 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
292 | int channel = imxdmac->channel; |
293 | unsigned long flags; |
294 | |
295 | dev_dbg(imxdma->dev, "%s channel %d\n" , __func__, channel); |
296 | |
297 | local_irq_save(flags); |
298 | |
299 | imx_dmav1_writel(imxdma, val: 1 << channel, DMA_DISR); |
300 | imx_dmav1_writel(imxdma, val: imx_dmav1_readl(imxdma, DMA_DIMR) & |
301 | ~(1 << channel), DMA_DIMR); |
302 | imx_dmav1_writel(imxdma, val: imx_dmav1_readl(imxdma, DMA_CCR(channel)) | |
303 | CCR_CEN | CCR_ACRPT, DMA_CCR(channel)); |
304 | |
305 | if (!is_imx1_dma(imxdma) && |
306 | d->sg && imxdma_hw_chain(imxdmac)) { |
307 | d->sg = sg_next(d->sg); |
308 | if (d->sg) { |
309 | u32 tmp; |
310 | imxdma_sg_next(d); |
311 | tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel)); |
312 | imx_dmav1_writel(imxdma, val: tmp | CCR_RPT | CCR_ACRPT, |
313 | DMA_CCR(channel)); |
314 | } |
315 | } |
316 | |
317 | local_irq_restore(flags); |
318 | } |
319 | |
320 | static void imxdma_disable_hw(struct imxdma_channel *imxdmac) |
321 | { |
322 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
323 | int channel = imxdmac->channel; |
324 | unsigned long flags; |
325 | |
326 | dev_dbg(imxdma->dev, "%s channel %d\n" , __func__, channel); |
327 | |
328 | if (imxdma_hw_chain(imxdmac)) |
329 | del_timer(timer: &imxdmac->watchdog); |
330 | |
331 | local_irq_save(flags); |
332 | imx_dmav1_writel(imxdma, val: imx_dmav1_readl(imxdma, DMA_DIMR) | |
333 | (1 << channel), DMA_DIMR); |
334 | imx_dmav1_writel(imxdma, val: imx_dmav1_readl(imxdma, DMA_CCR(channel)) & |
335 | ~CCR_CEN, DMA_CCR(channel)); |
336 | imx_dmav1_writel(imxdma, val: 1 << channel, DMA_DISR); |
337 | local_irq_restore(flags); |
338 | } |
339 | |
340 | static void imxdma_watchdog(struct timer_list *t) |
341 | { |
342 | struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog); |
343 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
344 | int channel = imxdmac->channel; |
345 | |
346 | imx_dmav1_writel(imxdma, val: 0, DMA_CCR(channel)); |
347 | |
348 | /* Tasklet watchdog error handler */ |
349 | tasklet_schedule(t: &imxdmac->dma_tasklet); |
350 | dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n" , |
351 | imxdmac->channel); |
352 | } |
353 | |
354 | static irqreturn_t imxdma_err_handler(int irq, void *dev_id) |
355 | { |
356 | struct imxdma_engine *imxdma = dev_id; |
357 | unsigned int err_mask; |
358 | int i, disr; |
359 | int errcode; |
360 | |
361 | disr = imx_dmav1_readl(imxdma, DMA_DISR); |
362 | |
363 | err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) | |
364 | imx_dmav1_readl(imxdma, DMA_DRTOSR) | |
365 | imx_dmav1_readl(imxdma, DMA_DSESR) | |
366 | imx_dmav1_readl(imxdma, DMA_DBOSR); |
367 | |
368 | if (!err_mask) |
369 | return IRQ_HANDLED; |
370 | |
371 | imx_dmav1_writel(imxdma, val: disr & err_mask, DMA_DISR); |
372 | |
373 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { |
374 | if (!(err_mask & (1 << i))) |
375 | continue; |
376 | errcode = 0; |
377 | |
378 | if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) { |
379 | imx_dmav1_writel(imxdma, val: 1 << i, DMA_DBTOSR); |
380 | errcode |= IMX_DMA_ERR_BURST; |
381 | } |
382 | if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) { |
383 | imx_dmav1_writel(imxdma, val: 1 << i, DMA_DRTOSR); |
384 | errcode |= IMX_DMA_ERR_REQUEST; |
385 | } |
386 | if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) { |
387 | imx_dmav1_writel(imxdma, val: 1 << i, DMA_DSESR); |
388 | errcode |= IMX_DMA_ERR_TRANSFER; |
389 | } |
390 | if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) { |
391 | imx_dmav1_writel(imxdma, val: 1 << i, DMA_DBOSR); |
392 | errcode |= IMX_DMA_ERR_BUFFER; |
393 | } |
394 | /* Tasklet error handler */ |
395 | tasklet_schedule(t: &imxdma->channel[i].dma_tasklet); |
396 | |
397 | dev_warn(imxdma->dev, |
398 | "DMA timeout on channel %d -%s%s%s%s\n" , i, |
399 | errcode & IMX_DMA_ERR_BURST ? " burst" : "" , |
400 | errcode & IMX_DMA_ERR_REQUEST ? " request" : "" , |
401 | errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "" , |
402 | errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "" ); |
403 | } |
404 | return IRQ_HANDLED; |
405 | } |
406 | |
407 | static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) |
408 | { |
409 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
410 | int chno = imxdmac->channel; |
411 | struct imxdma_desc *desc; |
412 | unsigned long flags; |
413 | |
414 | spin_lock_irqsave(&imxdma->lock, flags); |
415 | if (list_empty(head: &imxdmac->ld_active)) { |
416 | spin_unlock_irqrestore(lock: &imxdma->lock, flags); |
417 | goto out; |
418 | } |
419 | |
420 | desc = list_first_entry(&imxdmac->ld_active, |
421 | struct imxdma_desc, |
422 | node); |
423 | spin_unlock_irqrestore(lock: &imxdma->lock, flags); |
424 | |
425 | if (desc->sg) { |
426 | u32 tmp; |
427 | desc->sg = sg_next(desc->sg); |
428 | |
429 | if (desc->sg) { |
430 | imxdma_sg_next(d: desc); |
431 | |
432 | tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno)); |
433 | |
434 | if (imxdma_hw_chain(imxdmac)) { |
435 | /* FIXME: The timeout should probably be |
436 | * configurable |
437 | */ |
438 | mod_timer(timer: &imxdmac->watchdog, |
439 | expires: jiffies + msecs_to_jiffies(m: 500)); |
440 | |
441 | tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; |
442 | imx_dmav1_writel(imxdma, val: tmp, DMA_CCR(chno)); |
443 | } else { |
444 | imx_dmav1_writel(imxdma, val: tmp & ~CCR_CEN, |
445 | DMA_CCR(chno)); |
446 | tmp |= CCR_CEN; |
447 | } |
448 | |
449 | imx_dmav1_writel(imxdma, val: tmp, DMA_CCR(chno)); |
450 | |
451 | if (imxdma_chan_is_doing_cyclic(imxdmac)) |
452 | /* Tasklet progression */ |
453 | tasklet_schedule(t: &imxdmac->dma_tasklet); |
454 | |
455 | return; |
456 | } |
457 | |
458 | if (imxdma_hw_chain(imxdmac)) { |
459 | del_timer(timer: &imxdmac->watchdog); |
460 | return; |
461 | } |
462 | } |
463 | |
464 | out: |
465 | imx_dmav1_writel(imxdma, val: 0, DMA_CCR(chno)); |
466 | /* Tasklet irq */ |
467 | tasklet_schedule(t: &imxdmac->dma_tasklet); |
468 | } |
469 | |
470 | static irqreturn_t dma_irq_handler(int irq, void *dev_id) |
471 | { |
472 | struct imxdma_engine *imxdma = dev_id; |
473 | int i, disr; |
474 | |
475 | if (!is_imx1_dma(imxdma)) |
476 | imxdma_err_handler(irq, dev_id); |
477 | |
478 | disr = imx_dmav1_readl(imxdma, DMA_DISR); |
479 | |
480 | dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n" , __func__, disr); |
481 | |
482 | imx_dmav1_writel(imxdma, val: disr, DMA_DISR); |
483 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { |
484 | if (disr & (1 << i)) |
485 | dma_irq_handle_channel(imxdmac: &imxdma->channel[i]); |
486 | } |
487 | |
488 | return IRQ_HANDLED; |
489 | } |
490 | |
491 | static int imxdma_xfer_desc(struct imxdma_desc *d) |
492 | { |
493 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan: d->desc.chan); |
494 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
495 | int slot = -1; |
496 | int i; |
497 | |
498 | /* Configure and enable */ |
499 | switch (d->type) { |
500 | case IMXDMA_DESC_INTERLEAVED: |
501 | /* Try to get a free 2D slot */ |
502 | for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { |
503 | if ((imxdma->slots_2d[i].count > 0) && |
504 | ((imxdma->slots_2d[i].xsr != d->x) || |
505 | (imxdma->slots_2d[i].ysr != d->y) || |
506 | (imxdma->slots_2d[i].wsr != d->w))) |
507 | continue; |
508 | slot = i; |
509 | break; |
510 | } |
511 | if (slot < 0) |
512 | return -EBUSY; |
513 | |
514 | imxdma->slots_2d[slot].xsr = d->x; |
515 | imxdma->slots_2d[slot].ysr = d->y; |
516 | imxdma->slots_2d[slot].wsr = d->w; |
517 | imxdma->slots_2d[slot].count++; |
518 | |
519 | imxdmac->slot_2d = slot; |
520 | imxdmac->enabled_2d = true; |
521 | |
522 | if (slot == IMX_DMA_2D_SLOT_A) { |
523 | d->config_mem &= ~CCR_MSEL_B; |
524 | d->config_port &= ~CCR_MSEL_B; |
525 | imx_dmav1_writel(imxdma, val: d->x, DMA_XSRA); |
526 | imx_dmav1_writel(imxdma, val: d->y, DMA_YSRA); |
527 | imx_dmav1_writel(imxdma, val: d->w, DMA_WSRA); |
528 | } else { |
529 | d->config_mem |= CCR_MSEL_B; |
530 | d->config_port |= CCR_MSEL_B; |
531 | imx_dmav1_writel(imxdma, val: d->x, DMA_XSRB); |
532 | imx_dmav1_writel(imxdma, val: d->y, DMA_YSRB); |
533 | imx_dmav1_writel(imxdma, val: d->w, DMA_WSRB); |
534 | } |
535 | /* |
536 | * We fall-through here intentionally, since a 2D transfer is |
537 | * similar to MEMCPY just adding the 2D slot configuration. |
538 | */ |
539 | fallthrough; |
540 | case IMXDMA_DESC_MEMCPY: |
541 | imx_dmav1_writel(imxdma, val: d->src, DMA_SAR(imxdmac->channel)); |
542 | imx_dmav1_writel(imxdma, val: d->dest, DMA_DAR(imxdmac->channel)); |
543 | imx_dmav1_writel(imxdma, val: d->config_mem | (d->config_port << 2), |
544 | DMA_CCR(imxdmac->channel)); |
545 | |
546 | imx_dmav1_writel(imxdma, val: d->len, DMA_CNTR(imxdmac->channel)); |
547 | |
548 | dev_dbg(imxdma->dev, |
549 | "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n" , |
550 | __func__, imxdmac->channel, |
551 | (unsigned long long)d->dest, |
552 | (unsigned long long)d->src, d->len); |
553 | |
554 | break; |
555 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ |
556 | case IMXDMA_DESC_CYCLIC: |
557 | case IMXDMA_DESC_SLAVE_SG: |
558 | if (d->direction == DMA_DEV_TO_MEM) { |
559 | imx_dmav1_writel(imxdma, val: imxdmac->per_address, |
560 | DMA_SAR(imxdmac->channel)); |
561 | imx_dmav1_writel(imxdma, val: imxdmac->ccr_from_device, |
562 | DMA_CCR(imxdmac->channel)); |
563 | |
564 | dev_dbg(imxdma->dev, |
565 | "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n" , |
566 | __func__, imxdmac->channel, |
567 | d->sg, d->sgcount, d->len, |
568 | (unsigned long long)imxdmac->per_address); |
569 | } else if (d->direction == DMA_MEM_TO_DEV) { |
570 | imx_dmav1_writel(imxdma, val: imxdmac->per_address, |
571 | DMA_DAR(imxdmac->channel)); |
572 | imx_dmav1_writel(imxdma, val: imxdmac->ccr_to_device, |
573 | DMA_CCR(imxdmac->channel)); |
574 | |
575 | dev_dbg(imxdma->dev, |
576 | "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n" , |
577 | __func__, imxdmac->channel, |
578 | d->sg, d->sgcount, d->len, |
579 | (unsigned long long)imxdmac->per_address); |
580 | } else { |
581 | dev_err(imxdma->dev, "%s channel: %d bad dma mode\n" , |
582 | __func__, imxdmac->channel); |
583 | return -EINVAL; |
584 | } |
585 | |
586 | imxdma_sg_next(d); |
587 | |
588 | break; |
589 | default: |
590 | return -EINVAL; |
591 | } |
592 | imxdma_enable_hw(d); |
593 | return 0; |
594 | } |
595 | |
596 | static void imxdma_tasklet(struct tasklet_struct *t) |
597 | { |
598 | struct imxdma_channel *imxdmac = from_tasklet(imxdmac, t, dma_tasklet); |
599 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
600 | struct imxdma_desc *desc, *next_desc; |
601 | unsigned long flags; |
602 | |
603 | spin_lock_irqsave(&imxdma->lock, flags); |
604 | |
605 | if (list_empty(head: &imxdmac->ld_active)) { |
606 | /* Someone might have called terminate all */ |
607 | spin_unlock_irqrestore(lock: &imxdma->lock, flags); |
608 | return; |
609 | } |
610 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); |
611 | |
612 | /* If we are dealing with a cyclic descriptor, keep it on ld_active |
613 | * and dont mark the descriptor as complete. |
614 | * Only in non-cyclic cases it would be marked as complete |
615 | */ |
616 | if (imxdma_chan_is_doing_cyclic(imxdmac)) |
617 | goto out; |
618 | else |
619 | dma_cookie_complete(tx: &desc->desc); |
620 | |
621 | /* Free 2D slot if it was an interleaved transfer */ |
622 | if (imxdmac->enabled_2d) { |
623 | imxdma->slots_2d[imxdmac->slot_2d].count--; |
624 | imxdmac->enabled_2d = false; |
625 | } |
626 | |
627 | list_move_tail(list: imxdmac->ld_active.next, head: &imxdmac->ld_free); |
628 | |
629 | if (!list_empty(head: &imxdmac->ld_queue)) { |
630 | next_desc = list_first_entry(&imxdmac->ld_queue, |
631 | struct imxdma_desc, node); |
632 | list_move_tail(list: imxdmac->ld_queue.next, head: &imxdmac->ld_active); |
633 | if (imxdma_xfer_desc(d: next_desc) < 0) |
634 | dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n" , |
635 | __func__, imxdmac->channel); |
636 | } |
637 | out: |
638 | spin_unlock_irqrestore(lock: &imxdma->lock, flags); |
639 | |
640 | dmaengine_desc_get_callback_invoke(tx: &desc->desc, NULL); |
641 | } |
642 | |
643 | static int imxdma_terminate_all(struct dma_chan *chan) |
644 | { |
645 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
646 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
647 | unsigned long flags; |
648 | |
649 | imxdma_disable_hw(imxdmac); |
650 | |
651 | spin_lock_irqsave(&imxdma->lock, flags); |
652 | list_splice_tail_init(list: &imxdmac->ld_active, head: &imxdmac->ld_free); |
653 | list_splice_tail_init(list: &imxdmac->ld_queue, head: &imxdmac->ld_free); |
654 | spin_unlock_irqrestore(lock: &imxdma->lock, flags); |
655 | return 0; |
656 | } |
657 | |
658 | static int imxdma_config_write(struct dma_chan *chan, |
659 | struct dma_slave_config *dmaengine_cfg, |
660 | enum dma_transfer_direction direction) |
661 | { |
662 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
663 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
664 | unsigned int mode = 0; |
665 | |
666 | if (direction == DMA_DEV_TO_MEM) { |
667 | imxdmac->per_address = dmaengine_cfg->src_addr; |
668 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; |
669 | imxdmac->word_size = dmaengine_cfg->src_addr_width; |
670 | } else { |
671 | imxdmac->per_address = dmaengine_cfg->dst_addr; |
672 | imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; |
673 | imxdmac->word_size = dmaengine_cfg->dst_addr_width; |
674 | } |
675 | |
676 | switch (imxdmac->word_size) { |
677 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
678 | mode = IMX_DMA_MEMSIZE_8; |
679 | break; |
680 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
681 | mode = IMX_DMA_MEMSIZE_16; |
682 | break; |
683 | default: |
684 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
685 | mode = IMX_DMA_MEMSIZE_32; |
686 | break; |
687 | } |
688 | |
689 | imxdmac->hw_chaining = 0; |
690 | |
691 | imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | |
692 | ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | |
693 | CCR_REN; |
694 | imxdmac->ccr_to_device = |
695 | (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | |
696 | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; |
697 | imx_dmav1_writel(imxdma, val: imxdmac->dma_request, |
698 | DMA_RSSR(imxdmac->channel)); |
699 | |
700 | /* Set burst length */ |
701 | imx_dmav1_writel(imxdma, val: imxdmac->watermark_level * |
702 | imxdmac->word_size, DMA_BLR(imxdmac->channel)); |
703 | |
704 | return 0; |
705 | } |
706 | |
707 | static int imxdma_config(struct dma_chan *chan, |
708 | struct dma_slave_config *dmaengine_cfg) |
709 | { |
710 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
711 | |
712 | memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg)); |
713 | |
714 | return 0; |
715 | } |
716 | |
717 | static enum dma_status imxdma_tx_status(struct dma_chan *chan, |
718 | dma_cookie_t cookie, |
719 | struct dma_tx_state *txstate) |
720 | { |
721 | return dma_cookie_status(chan, cookie, state: txstate); |
722 | } |
723 | |
724 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) |
725 | { |
726 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan: tx->chan); |
727 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
728 | dma_cookie_t cookie; |
729 | unsigned long flags; |
730 | |
731 | spin_lock_irqsave(&imxdma->lock, flags); |
732 | list_move_tail(list: imxdmac->ld_free.next, head: &imxdmac->ld_queue); |
733 | cookie = dma_cookie_assign(tx); |
734 | spin_unlock_irqrestore(lock: &imxdma->lock, flags); |
735 | |
736 | return cookie; |
737 | } |
738 | |
739 | static int imxdma_alloc_chan_resources(struct dma_chan *chan) |
740 | { |
741 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
742 | struct imx_dma_data *data = chan->private; |
743 | |
744 | if (data != NULL) |
745 | imxdmac->dma_request = data->dma_request; |
746 | |
747 | while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { |
748 | struct imxdma_desc *desc; |
749 | |
750 | desc = kzalloc(size: sizeof(*desc), GFP_KERNEL); |
751 | if (!desc) |
752 | break; |
753 | dma_async_tx_descriptor_init(tx: &desc->desc, chan); |
754 | desc->desc.tx_submit = imxdma_tx_submit; |
755 | /* txd.flags will be overwritten in prep funcs */ |
756 | desc->desc.flags = DMA_CTRL_ACK; |
757 | desc->status = DMA_COMPLETE; |
758 | |
759 | list_add_tail(new: &desc->node, head: &imxdmac->ld_free); |
760 | imxdmac->descs_allocated++; |
761 | } |
762 | |
763 | if (!imxdmac->descs_allocated) |
764 | return -ENOMEM; |
765 | |
766 | return imxdmac->descs_allocated; |
767 | } |
768 | |
769 | static void imxdma_free_chan_resources(struct dma_chan *chan) |
770 | { |
771 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
772 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
773 | struct imxdma_desc *desc, *_desc; |
774 | unsigned long flags; |
775 | |
776 | spin_lock_irqsave(&imxdma->lock, flags); |
777 | |
778 | imxdma_disable_hw(imxdmac); |
779 | list_splice_tail_init(list: &imxdmac->ld_active, head: &imxdmac->ld_free); |
780 | list_splice_tail_init(list: &imxdmac->ld_queue, head: &imxdmac->ld_free); |
781 | |
782 | spin_unlock_irqrestore(lock: &imxdma->lock, flags); |
783 | |
784 | list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { |
785 | kfree(objp: desc); |
786 | imxdmac->descs_allocated--; |
787 | } |
788 | INIT_LIST_HEAD(list: &imxdmac->ld_free); |
789 | |
790 | kfree(objp: imxdmac->sg_list); |
791 | imxdmac->sg_list = NULL; |
792 | } |
793 | |
794 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( |
795 | struct dma_chan *chan, struct scatterlist *sgl, |
796 | unsigned int sg_len, enum dma_transfer_direction direction, |
797 | unsigned long flags, void *context) |
798 | { |
799 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
800 | struct scatterlist *sg; |
801 | int i, dma_length = 0; |
802 | struct imxdma_desc *desc; |
803 | |
804 | if (list_empty(head: &imxdmac->ld_free) || |
805 | imxdma_chan_is_doing_cyclic(imxdmac)) |
806 | return NULL; |
807 | |
808 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
809 | |
810 | for_each_sg(sgl, sg, sg_len, i) { |
811 | dma_length += sg_dma_len(sg); |
812 | } |
813 | |
814 | imxdma_config_write(chan, dmaengine_cfg: &imxdmac->config, direction); |
815 | |
816 | switch (imxdmac->word_size) { |
817 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
818 | if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3) |
819 | return NULL; |
820 | break; |
821 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
822 | if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1) |
823 | return NULL; |
824 | break; |
825 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
826 | break; |
827 | default: |
828 | return NULL; |
829 | } |
830 | |
831 | desc->type = IMXDMA_DESC_SLAVE_SG; |
832 | desc->sg = sgl; |
833 | desc->sgcount = sg_len; |
834 | desc->len = dma_length; |
835 | desc->direction = direction; |
836 | if (direction == DMA_DEV_TO_MEM) { |
837 | desc->src = imxdmac->per_address; |
838 | } else { |
839 | desc->dest = imxdmac->per_address; |
840 | } |
841 | desc->desc.callback = NULL; |
842 | desc->desc.callback_param = NULL; |
843 | |
844 | return &desc->desc; |
845 | } |
846 | |
847 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( |
848 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
849 | size_t period_len, enum dma_transfer_direction direction, |
850 | unsigned long flags) |
851 | { |
852 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
853 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
854 | struct imxdma_desc *desc; |
855 | int i; |
856 | unsigned int periods = buf_len / period_len; |
857 | |
858 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n" , |
859 | __func__, imxdmac->channel, buf_len, period_len); |
860 | |
861 | if (list_empty(head: &imxdmac->ld_free) || |
862 | imxdma_chan_is_doing_cyclic(imxdmac)) |
863 | return NULL; |
864 | |
865 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
866 | |
867 | kfree(objp: imxdmac->sg_list); |
868 | |
869 | imxdmac->sg_list = kcalloc(n: periods + 1, |
870 | size: sizeof(struct scatterlist), GFP_ATOMIC); |
871 | if (!imxdmac->sg_list) |
872 | return NULL; |
873 | |
874 | sg_init_table(imxdmac->sg_list, periods); |
875 | |
876 | for (i = 0; i < periods; i++) { |
877 | sg_assign_page(sg: &imxdmac->sg_list[i], NULL); |
878 | imxdmac->sg_list[i].offset = 0; |
879 | imxdmac->sg_list[i].dma_address = dma_addr; |
880 | sg_dma_len(&imxdmac->sg_list[i]) = period_len; |
881 | dma_addr += period_len; |
882 | } |
883 | |
884 | /* close the loop */ |
885 | sg_chain(prv: imxdmac->sg_list, prv_nents: periods + 1, sgl: imxdmac->sg_list); |
886 | |
887 | desc->type = IMXDMA_DESC_CYCLIC; |
888 | desc->sg = imxdmac->sg_list; |
889 | desc->sgcount = periods; |
890 | desc->len = IMX_DMA_LENGTH_LOOP; |
891 | desc->direction = direction; |
892 | if (direction == DMA_DEV_TO_MEM) { |
893 | desc->src = imxdmac->per_address; |
894 | } else { |
895 | desc->dest = imxdmac->per_address; |
896 | } |
897 | desc->desc.callback = NULL; |
898 | desc->desc.callback_param = NULL; |
899 | |
900 | imxdma_config_write(chan, dmaengine_cfg: &imxdmac->config, direction); |
901 | |
902 | return &desc->desc; |
903 | } |
904 | |
905 | static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( |
906 | struct dma_chan *chan, dma_addr_t dest, |
907 | dma_addr_t src, size_t len, unsigned long flags) |
908 | { |
909 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
910 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
911 | struct imxdma_desc *desc; |
912 | |
913 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n" , |
914 | __func__, imxdmac->channel, (unsigned long long)src, |
915 | (unsigned long long)dest, len); |
916 | |
917 | if (list_empty(head: &imxdmac->ld_free) || |
918 | imxdma_chan_is_doing_cyclic(imxdmac)) |
919 | return NULL; |
920 | |
921 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
922 | |
923 | desc->type = IMXDMA_DESC_MEMCPY; |
924 | desc->src = src; |
925 | desc->dest = dest; |
926 | desc->len = len; |
927 | desc->direction = DMA_MEM_TO_MEM; |
928 | desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; |
929 | desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; |
930 | desc->desc.callback = NULL; |
931 | desc->desc.callback_param = NULL; |
932 | |
933 | return &desc->desc; |
934 | } |
935 | |
936 | static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( |
937 | struct dma_chan *chan, struct dma_interleaved_template *xt, |
938 | unsigned long flags) |
939 | { |
940 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
941 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
942 | struct imxdma_desc *desc; |
943 | |
944 | dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n" |
945 | " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n" , __func__, |
946 | imxdmac->channel, (unsigned long long)xt->src_start, |
947 | (unsigned long long) xt->dst_start, |
948 | xt->src_sgl ? "true" : "false" , xt->dst_sgl ? "true" : "false" , |
949 | xt->numf, xt->frame_size); |
950 | |
951 | if (list_empty(head: &imxdmac->ld_free) || |
952 | imxdma_chan_is_doing_cyclic(imxdmac)) |
953 | return NULL; |
954 | |
955 | if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM) |
956 | return NULL; |
957 | |
958 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
959 | |
960 | desc->type = IMXDMA_DESC_INTERLEAVED; |
961 | desc->src = xt->src_start; |
962 | desc->dest = xt->dst_start; |
963 | desc->x = xt->sgl[0].size; |
964 | desc->y = xt->numf; |
965 | desc->w = xt->sgl[0].icg + desc->x; |
966 | desc->len = desc->x * desc->y; |
967 | desc->direction = DMA_MEM_TO_MEM; |
968 | desc->config_port = IMX_DMA_MEMSIZE_32; |
969 | desc->config_mem = IMX_DMA_MEMSIZE_32; |
970 | if (xt->src_sgl) |
971 | desc->config_mem |= IMX_DMA_TYPE_2D; |
972 | if (xt->dst_sgl) |
973 | desc->config_port |= IMX_DMA_TYPE_2D; |
974 | desc->desc.callback = NULL; |
975 | desc->desc.callback_param = NULL; |
976 | |
977 | return &desc->desc; |
978 | } |
979 | |
980 | static void imxdma_issue_pending(struct dma_chan *chan) |
981 | { |
982 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
983 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
984 | struct imxdma_desc *desc; |
985 | unsigned long flags; |
986 | |
987 | spin_lock_irqsave(&imxdma->lock, flags); |
988 | if (list_empty(head: &imxdmac->ld_active) && |
989 | !list_empty(head: &imxdmac->ld_queue)) { |
990 | desc = list_first_entry(&imxdmac->ld_queue, |
991 | struct imxdma_desc, node); |
992 | |
993 | if (imxdma_xfer_desc(d: desc) < 0) { |
994 | dev_warn(imxdma->dev, |
995 | "%s: channel: %d couldn't issue DMA xfer\n" , |
996 | __func__, imxdmac->channel); |
997 | } else { |
998 | list_move_tail(list: imxdmac->ld_queue.next, |
999 | head: &imxdmac->ld_active); |
1000 | } |
1001 | } |
1002 | spin_unlock_irqrestore(lock: &imxdma->lock, flags); |
1003 | } |
1004 | |
1005 | static bool imxdma_filter_fn(struct dma_chan *chan, void *param) |
1006 | { |
1007 | struct imxdma_filter_data *fdata = param; |
1008 | struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan); |
1009 | |
1010 | if (chan->device->dev != fdata->imxdma->dev) |
1011 | return false; |
1012 | |
1013 | imxdma_chan->dma_request = fdata->request; |
1014 | chan->private = NULL; |
1015 | |
1016 | return true; |
1017 | } |
1018 | |
1019 | static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, |
1020 | struct of_dma *ofdma) |
1021 | { |
1022 | int count = dma_spec->args_count; |
1023 | struct imxdma_engine *imxdma = ofdma->of_dma_data; |
1024 | struct imxdma_filter_data fdata = { |
1025 | .imxdma = imxdma, |
1026 | }; |
1027 | |
1028 | if (count != 1) |
1029 | return NULL; |
1030 | |
1031 | fdata.request = dma_spec->args[0]; |
1032 | |
1033 | return dma_request_channel(imxdma->dma_device.cap_mask, |
1034 | imxdma_filter_fn, &fdata); |
1035 | } |
1036 | |
1037 | static int __init imxdma_probe(struct platform_device *pdev) |
1038 | { |
1039 | struct imxdma_engine *imxdma; |
1040 | int ret, i; |
1041 | int irq, irq_err; |
1042 | |
1043 | imxdma = devm_kzalloc(dev: &pdev->dev, size: sizeof(*imxdma), GFP_KERNEL); |
1044 | if (!imxdma) |
1045 | return -ENOMEM; |
1046 | |
1047 | imxdma->dev = &pdev->dev; |
1048 | imxdma->devtype = (uintptr_t)of_device_get_match_data(dev: &pdev->dev); |
1049 | |
1050 | imxdma->base = devm_platform_ioremap_resource(pdev, index: 0); |
1051 | if (IS_ERR(ptr: imxdma->base)) |
1052 | return PTR_ERR(ptr: imxdma->base); |
1053 | |
1054 | irq = platform_get_irq(pdev, 0); |
1055 | if (irq < 0) |
1056 | return irq; |
1057 | |
1058 | imxdma->dma_ipg = devm_clk_get(dev: &pdev->dev, id: "ipg" ); |
1059 | if (IS_ERR(ptr: imxdma->dma_ipg)) |
1060 | return PTR_ERR(ptr: imxdma->dma_ipg); |
1061 | |
1062 | imxdma->dma_ahb = devm_clk_get(dev: &pdev->dev, id: "ahb" ); |
1063 | if (IS_ERR(ptr: imxdma->dma_ahb)) |
1064 | return PTR_ERR(ptr: imxdma->dma_ahb); |
1065 | |
1066 | ret = clk_prepare_enable(clk: imxdma->dma_ipg); |
1067 | if (ret) |
1068 | return ret; |
1069 | ret = clk_prepare_enable(clk: imxdma->dma_ahb); |
1070 | if (ret) |
1071 | goto disable_dma_ipg_clk; |
1072 | |
1073 | /* reset DMA module */ |
1074 | imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); |
1075 | |
1076 | if (is_imx1_dma(imxdma)) { |
1077 | ret = devm_request_irq(dev: &pdev->dev, irq, |
1078 | handler: dma_irq_handler, irqflags: 0, devname: "DMA" , dev_id: imxdma); |
1079 | if (ret) { |
1080 | dev_warn(imxdma->dev, "Can't register IRQ for DMA\n" ); |
1081 | goto disable_dma_ahb_clk; |
1082 | } |
1083 | imxdma->irq = irq; |
1084 | |
1085 | irq_err = platform_get_irq(pdev, 1); |
1086 | if (irq_err < 0) { |
1087 | ret = irq_err; |
1088 | goto disable_dma_ahb_clk; |
1089 | } |
1090 | |
1091 | ret = devm_request_irq(dev: &pdev->dev, irq: irq_err, |
1092 | handler: imxdma_err_handler, irqflags: 0, devname: "DMA" , dev_id: imxdma); |
1093 | if (ret) { |
1094 | dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n" ); |
1095 | goto disable_dma_ahb_clk; |
1096 | } |
1097 | imxdma->irq_err = irq_err; |
1098 | } |
1099 | |
1100 | /* enable DMA module */ |
1101 | imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR); |
1102 | |
1103 | /* clear all interrupts */ |
1104 | imx_dmav1_writel(imxdma, val: (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); |
1105 | |
1106 | /* disable interrupts */ |
1107 | imx_dmav1_writel(imxdma, val: (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); |
1108 | |
1109 | INIT_LIST_HEAD(list: &imxdma->dma_device.channels); |
1110 | |
1111 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); |
1112 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); |
1113 | dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); |
1114 | dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask); |
1115 | |
1116 | /* Initialize 2D global parameters */ |
1117 | for (i = 0; i < IMX_DMA_2D_SLOTS; i++) |
1118 | imxdma->slots_2d[i].count = 0; |
1119 | |
1120 | spin_lock_init(&imxdma->lock); |
1121 | |
1122 | /* Initialize channel parameters */ |
1123 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { |
1124 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
1125 | |
1126 | if (!is_imx1_dma(imxdma)) { |
1127 | ret = devm_request_irq(dev: &pdev->dev, irq: irq + i, |
1128 | handler: dma_irq_handler, irqflags: 0, devname: "DMA" , dev_id: imxdma); |
1129 | if (ret) { |
1130 | dev_warn(imxdma->dev, "Can't register IRQ %d " |
1131 | "for DMA channel %d\n" , |
1132 | irq + i, i); |
1133 | goto disable_dma_ahb_clk; |
1134 | } |
1135 | |
1136 | imxdmac->irq = irq + i; |
1137 | timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0); |
1138 | } |
1139 | |
1140 | imxdmac->imxdma = imxdma; |
1141 | |
1142 | INIT_LIST_HEAD(list: &imxdmac->ld_queue); |
1143 | INIT_LIST_HEAD(list: &imxdmac->ld_free); |
1144 | INIT_LIST_HEAD(list: &imxdmac->ld_active); |
1145 | |
1146 | tasklet_setup(t: &imxdmac->dma_tasklet, callback: imxdma_tasklet); |
1147 | imxdmac->chan.device = &imxdma->dma_device; |
1148 | dma_cookie_init(chan: &imxdmac->chan); |
1149 | imxdmac->channel = i; |
1150 | |
1151 | /* Add the channel to the DMAC list */ |
1152 | list_add_tail(new: &imxdmac->chan.device_node, |
1153 | head: &imxdma->dma_device.channels); |
1154 | } |
1155 | |
1156 | imxdma->dma_device.dev = &pdev->dev; |
1157 | |
1158 | imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; |
1159 | imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; |
1160 | imxdma->dma_device.device_tx_status = imxdma_tx_status; |
1161 | imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; |
1162 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; |
1163 | imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; |
1164 | imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; |
1165 | imxdma->dma_device.device_config = imxdma_config; |
1166 | imxdma->dma_device.device_terminate_all = imxdma_terminate_all; |
1167 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; |
1168 | |
1169 | platform_set_drvdata(pdev, data: imxdma); |
1170 | |
1171 | imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES; |
1172 | dma_set_max_seg_size(dev: imxdma->dma_device.dev, size: 0xffffff); |
1173 | |
1174 | ret = dma_async_device_register(device: &imxdma->dma_device); |
1175 | if (ret) { |
1176 | dev_err(&pdev->dev, "unable to register\n" ); |
1177 | goto disable_dma_ahb_clk; |
1178 | } |
1179 | |
1180 | if (pdev->dev.of_node) { |
1181 | ret = of_dma_controller_register(np: pdev->dev.of_node, |
1182 | of_dma_xlate: imxdma_xlate, data: imxdma); |
1183 | if (ret) { |
1184 | dev_err(&pdev->dev, "unable to register of_dma_controller\n" ); |
1185 | goto err_of_dma_controller; |
1186 | } |
1187 | } |
1188 | |
1189 | return 0; |
1190 | |
1191 | err_of_dma_controller: |
1192 | dma_async_device_unregister(device: &imxdma->dma_device); |
1193 | disable_dma_ahb_clk: |
1194 | clk_disable_unprepare(clk: imxdma->dma_ahb); |
1195 | disable_dma_ipg_clk: |
1196 | clk_disable_unprepare(clk: imxdma->dma_ipg); |
1197 | return ret; |
1198 | } |
1199 | |
1200 | static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma) |
1201 | { |
1202 | int i; |
1203 | |
1204 | if (is_imx1_dma(imxdma)) { |
1205 | disable_irq(irq: imxdma->irq); |
1206 | disable_irq(irq: imxdma->irq_err); |
1207 | } |
1208 | |
1209 | for (i = 0; i < IMX_DMA_CHANNELS; i++) { |
1210 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
1211 | |
1212 | if (!is_imx1_dma(imxdma)) |
1213 | disable_irq(irq: imxdmac->irq); |
1214 | |
1215 | tasklet_kill(t: &imxdmac->dma_tasklet); |
1216 | } |
1217 | } |
1218 | |
1219 | static void imxdma_remove(struct platform_device *pdev) |
1220 | { |
1221 | struct imxdma_engine *imxdma = platform_get_drvdata(pdev); |
1222 | |
1223 | imxdma_free_irq(pdev, imxdma); |
1224 | |
1225 | dma_async_device_unregister(device: &imxdma->dma_device); |
1226 | |
1227 | if (pdev->dev.of_node) |
1228 | of_dma_controller_free(np: pdev->dev.of_node); |
1229 | |
1230 | clk_disable_unprepare(clk: imxdma->dma_ipg); |
1231 | clk_disable_unprepare(clk: imxdma->dma_ahb); |
1232 | } |
1233 | |
1234 | static struct platform_driver imxdma_driver = { |
1235 | .driver = { |
1236 | .name = "imx-dma" , |
1237 | .of_match_table = imx_dma_of_dev_id, |
1238 | }, |
1239 | .remove_new = imxdma_remove, |
1240 | }; |
1241 | |
1242 | static int __init imxdma_module_init(void) |
1243 | { |
1244 | return platform_driver_probe(&imxdma_driver, imxdma_probe); |
1245 | } |
1246 | subsys_initcall(imxdma_module_init); |
1247 | |
1248 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>" ); |
1249 | MODULE_DESCRIPTION("i.MX dma driver" ); |
1250 | MODULE_LICENSE("GPL" ); |
1251 | |