1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Core driver for the Synopsys DesignWare DMA Controller |
4 | * |
5 | * Copyright (C) 2007-2008 Atmel Corporation |
6 | * Copyright (C) 2010-2011 ST Microelectronics |
7 | * Copyright (C) 2013 Intel Corporation |
8 | */ |
9 | |
10 | #include <linux/bitops.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/dmaengine.h> |
13 | #include <linux/dma-mapping.h> |
14 | #include <linux/dmapool.h> |
15 | #include <linux/err.h> |
16 | #include <linux/init.h> |
17 | #include <linux/interrupt.h> |
18 | #include <linux/io.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/module.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/pm_runtime.h> |
23 | |
24 | #include "../dmaengine.h" |
25 | #include "internal.h" |
26 | |
27 | /* |
28 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", |
29 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all |
30 | * of which use ARM any more). See the "Databook" from Synopsys for |
31 | * information beyond what licensees probably provide. |
32 | */ |
33 | |
34 | /* The set of bus widths supported by the DMA controller */ |
35 | #define DW_DMA_BUSWIDTHS \ |
36 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ |
37 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ |
38 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
39 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
40 | |
41 | /*----------------------------------------------------------------------*/ |
42 | |
43 | static struct device *chan2dev(struct dma_chan *chan) |
44 | { |
45 | return &chan->dev->device; |
46 | } |
47 | |
48 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
49 | { |
50 | return to_dw_desc(dwc->active_list.next); |
51 | } |
52 | |
53 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) |
54 | { |
55 | struct dw_desc *desc = txd_to_dw_desc(txd: tx); |
56 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan: tx->chan); |
57 | dma_cookie_t cookie; |
58 | unsigned long flags; |
59 | |
60 | spin_lock_irqsave(&dwc->lock, flags); |
61 | cookie = dma_cookie_assign(tx); |
62 | |
63 | /* |
64 | * REVISIT: We should attempt to chain as many descriptors as |
65 | * possible, perhaps even appending to those already submitted |
66 | * for DMA. But this is hard to do in a race-free manner. |
67 | */ |
68 | |
69 | list_add_tail(new: &desc->desc_node, head: &dwc->queue); |
70 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
71 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n" , |
72 | __func__, desc->txd.cookie); |
73 | |
74 | return cookie; |
75 | } |
76 | |
77 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
78 | { |
79 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); |
80 | struct dw_desc *desc; |
81 | dma_addr_t phys; |
82 | |
83 | desc = dma_pool_zalloc(pool: dw->desc_pool, GFP_ATOMIC, handle: &phys); |
84 | if (!desc) |
85 | return NULL; |
86 | |
87 | dwc->descs_allocated++; |
88 | INIT_LIST_HEAD(list: &desc->tx_list); |
89 | dma_async_tx_descriptor_init(tx: &desc->txd, chan: &dwc->chan); |
90 | desc->txd.tx_submit = dwc_tx_submit; |
91 | desc->txd.flags = DMA_CTRL_ACK; |
92 | desc->txd.phys = phys; |
93 | return desc; |
94 | } |
95 | |
96 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) |
97 | { |
98 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); |
99 | struct dw_desc *child, *_next; |
100 | |
101 | if (unlikely(!desc)) |
102 | return; |
103 | |
104 | list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) { |
105 | list_del(entry: &child->desc_node); |
106 | dma_pool_free(pool: dw->desc_pool, vaddr: child, addr: child->txd.phys); |
107 | dwc->descs_allocated--; |
108 | } |
109 | |
110 | dma_pool_free(pool: dw->desc_pool, vaddr: desc, addr: desc->txd.phys); |
111 | dwc->descs_allocated--; |
112 | } |
113 | |
114 | static void dwc_initialize(struct dw_dma_chan *dwc) |
115 | { |
116 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); |
117 | |
118 | dw->initialize_chan(dwc); |
119 | |
120 | /* Enable interrupts */ |
121 | channel_set_bit(dw, MASK.XFER, dwc->mask); |
122 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
123 | } |
124 | |
125 | /*----------------------------------------------------------------------*/ |
126 | |
127 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
128 | { |
129 | dev_err(chan2dev(&dwc->chan), |
130 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n" , |
131 | channel_readl(dwc, SAR), |
132 | channel_readl(dwc, DAR), |
133 | channel_readl(dwc, LLP), |
134 | channel_readl(dwc, CTL_HI), |
135 | channel_readl(dwc, CTL_LO)); |
136 | } |
137 | |
138 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) |
139 | { |
140 | channel_clear_bit(dw, CH_EN, dwc->mask); |
141 | while (dma_readl(dw, CH_EN) & dwc->mask) |
142 | cpu_relax(); |
143 | } |
144 | |
145 | /*----------------------------------------------------------------------*/ |
146 | |
147 | /* Perform single block transfer */ |
148 | static inline void dwc_do_single_block(struct dw_dma_chan *dwc, |
149 | struct dw_desc *desc) |
150 | { |
151 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); |
152 | u32 ctllo; |
153 | |
154 | /* |
155 | * Software emulation of LLP mode relies on interrupts to continue |
156 | * multi block transfer. |
157 | */ |
158 | ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN; |
159 | |
160 | channel_writel(dwc, SAR, lli_read(desc, sar)); |
161 | channel_writel(dwc, DAR, lli_read(desc, dar)); |
162 | channel_writel(dwc, CTL_LO, ctllo); |
163 | channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi)); |
164 | channel_set_bit(dw, CH_EN, dwc->mask); |
165 | |
166 | /* Move pointer to next descriptor */ |
167 | dwc->tx_node_active = dwc->tx_node_active->next; |
168 | } |
169 | |
170 | /* Called with dwc->lock held and bh disabled */ |
171 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) |
172 | { |
173 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); |
174 | u8 lms = DWC_LLP_LMS(dwc->dws.m_master); |
175 | unsigned long was_soft_llp; |
176 | |
177 | /* ASSERT: channel is idle */ |
178 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
179 | dev_err(chan2dev(&dwc->chan), |
180 | "%s: BUG: Attempted to start non-idle channel\n" , |
181 | __func__); |
182 | dwc_dump_chan_regs(dwc); |
183 | |
184 | /* The tasklet will hopefully advance the queue... */ |
185 | return; |
186 | } |
187 | |
188 | if (dwc->nollp) { |
189 | was_soft_llp = test_and_set_bit(nr: DW_DMA_IS_SOFT_LLP, |
190 | addr: &dwc->flags); |
191 | if (was_soft_llp) { |
192 | dev_err(chan2dev(&dwc->chan), |
193 | "BUG: Attempted to start new LLP transfer inside ongoing one\n" ); |
194 | return; |
195 | } |
196 | |
197 | dwc_initialize(dwc); |
198 | |
199 | first->residue = first->total_len; |
200 | dwc->tx_node_active = &first->tx_list; |
201 | |
202 | /* Submit first block */ |
203 | dwc_do_single_block(dwc, desc: first); |
204 | |
205 | return; |
206 | } |
207 | |
208 | dwc_initialize(dwc); |
209 | |
210 | channel_writel(dwc, LLP, first->txd.phys | lms); |
211 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
212 | channel_writel(dwc, CTL_HI, 0); |
213 | channel_set_bit(dw, CH_EN, dwc->mask); |
214 | } |
215 | |
216 | static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) |
217 | { |
218 | struct dw_desc *desc; |
219 | |
220 | if (list_empty(head: &dwc->queue)) |
221 | return; |
222 | |
223 | list_move(list: dwc->queue.next, head: &dwc->active_list); |
224 | desc = dwc_first_active(dwc); |
225 | dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n" , __func__, desc->txd.cookie); |
226 | dwc_dostart(dwc, first: desc); |
227 | } |
228 | |
229 | /*----------------------------------------------------------------------*/ |
230 | |
231 | static void |
232 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
233 | bool callback_required) |
234 | { |
235 | struct dma_async_tx_descriptor *txd = &desc->txd; |
236 | struct dw_desc *child; |
237 | unsigned long flags; |
238 | struct dmaengine_desc_callback cb; |
239 | |
240 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n" , txd->cookie); |
241 | |
242 | spin_lock_irqsave(&dwc->lock, flags); |
243 | dma_cookie_complete(tx: txd); |
244 | if (callback_required) |
245 | dmaengine_desc_get_callback(tx: txd, cb: &cb); |
246 | else |
247 | memset(&cb, 0, sizeof(cb)); |
248 | |
249 | /* async_tx_ack */ |
250 | list_for_each_entry(child, &desc->tx_list, desc_node) |
251 | async_tx_ack(tx: &child->txd); |
252 | async_tx_ack(tx: &desc->txd); |
253 | dwc_desc_put(dwc, desc); |
254 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
255 | |
256 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
257 | } |
258 | |
259 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) |
260 | { |
261 | struct dw_desc *desc, *_desc; |
262 | LIST_HEAD(list); |
263 | unsigned long flags; |
264 | |
265 | spin_lock_irqsave(&dwc->lock, flags); |
266 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
267 | dev_err(chan2dev(&dwc->chan), |
268 | "BUG: XFER bit set, but channel not idle!\n" ); |
269 | |
270 | /* Try to continue after resetting the channel... */ |
271 | dwc_chan_disable(dw, dwc); |
272 | } |
273 | |
274 | /* |
275 | * Submit queued descriptors ASAP, i.e. before we go through |
276 | * the completed ones. |
277 | */ |
278 | list_splice_init(list: &dwc->active_list, head: &list); |
279 | dwc_dostart_first_queued(dwc); |
280 | |
281 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
282 | |
283 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
284 | dwc_descriptor_complete(dwc, desc, callback_required: true); |
285 | } |
286 | |
287 | /* Returns how many bytes were already received from source */ |
288 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) |
289 | { |
290 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); |
291 | u32 ctlhi = channel_readl(dwc, CTL_HI); |
292 | u32 ctllo = channel_readl(dwc, CTL_LO); |
293 | |
294 | return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7); |
295 | } |
296 | |
297 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
298 | { |
299 | dma_addr_t llp; |
300 | struct dw_desc *desc, *_desc; |
301 | struct dw_desc *child; |
302 | u32 status_xfer; |
303 | unsigned long flags; |
304 | |
305 | spin_lock_irqsave(&dwc->lock, flags); |
306 | llp = channel_readl(dwc, LLP); |
307 | status_xfer = dma_readl(dw, RAW.XFER); |
308 | |
309 | if (status_xfer & dwc->mask) { |
310 | /* Everything we've submitted is done */ |
311 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
312 | |
313 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { |
314 | struct list_head *head, *active = dwc->tx_node_active; |
315 | |
316 | /* |
317 | * We are inside first active descriptor. |
318 | * Otherwise something is really wrong. |
319 | */ |
320 | desc = dwc_first_active(dwc); |
321 | |
322 | head = &desc->tx_list; |
323 | if (active != head) { |
324 | /* Update residue to reflect last sent descriptor */ |
325 | if (active == head->next) |
326 | desc->residue -= desc->len; |
327 | else |
328 | desc->residue -= to_dw_desc(active->prev)->len; |
329 | |
330 | child = to_dw_desc(active); |
331 | |
332 | /* Submit next block */ |
333 | dwc_do_single_block(dwc, desc: child); |
334 | |
335 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
336 | return; |
337 | } |
338 | |
339 | /* We are done here */ |
340 | clear_bit(nr: DW_DMA_IS_SOFT_LLP, addr: &dwc->flags); |
341 | } |
342 | |
343 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
344 | |
345 | dwc_complete_all(dw, dwc); |
346 | return; |
347 | } |
348 | |
349 | if (list_empty(head: &dwc->active_list)) { |
350 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
351 | return; |
352 | } |
353 | |
354 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { |
355 | dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n" , __func__); |
356 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
357 | return; |
358 | } |
359 | |
360 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n" , __func__, &llp); |
361 | |
362 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
363 | /* Initial residue value */ |
364 | desc->residue = desc->total_len; |
365 | |
366 | /* Check first descriptors addr */ |
367 | if (desc->txd.phys == DWC_LLP_LOC(llp)) { |
368 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
369 | return; |
370 | } |
371 | |
372 | /* Check first descriptors llp */ |
373 | if (lli_read(desc, llp) == llp) { |
374 | /* This one is currently in progress */ |
375 | desc->residue -= dwc_get_sent(dwc); |
376 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
377 | return; |
378 | } |
379 | |
380 | desc->residue -= desc->len; |
381 | list_for_each_entry(child, &desc->tx_list, desc_node) { |
382 | if (lli_read(child, llp) == llp) { |
383 | /* Currently in progress */ |
384 | desc->residue -= dwc_get_sent(dwc); |
385 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
386 | return; |
387 | } |
388 | desc->residue -= child->len; |
389 | } |
390 | |
391 | /* |
392 | * No descriptors so far seem to be in progress, i.e. |
393 | * this one must be done. |
394 | */ |
395 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
396 | dwc_descriptor_complete(dwc, desc, callback_required: true); |
397 | spin_lock_irqsave(&dwc->lock, flags); |
398 | } |
399 | |
400 | dev_err(chan2dev(&dwc->chan), |
401 | "BUG: All descriptors done, but channel not idle!\n" ); |
402 | |
403 | /* Try to continue after resetting the channel... */ |
404 | dwc_chan_disable(dw, dwc); |
405 | |
406 | dwc_dostart_first_queued(dwc); |
407 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
408 | } |
409 | |
410 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc) |
411 | { |
412 | dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n" , |
413 | lli_read(desc, sar), |
414 | lli_read(desc, dar), |
415 | lli_read(desc, llp), |
416 | lli_read(desc, ctlhi), |
417 | lli_read(desc, ctllo)); |
418 | } |
419 | |
420 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) |
421 | { |
422 | struct dw_desc *bad_desc; |
423 | struct dw_desc *child; |
424 | unsigned long flags; |
425 | |
426 | dwc_scan_descriptors(dw, dwc); |
427 | |
428 | spin_lock_irqsave(&dwc->lock, flags); |
429 | |
430 | /* |
431 | * The descriptor currently at the head of the active list is |
432 | * borked. Since we don't have any way to report errors, we'll |
433 | * just have to scream loudly and try to carry on. |
434 | */ |
435 | bad_desc = dwc_first_active(dwc); |
436 | list_del_init(entry: &bad_desc->desc_node); |
437 | list_move(list: dwc->queue.next, head: dwc->active_list.prev); |
438 | |
439 | /* Clear the error flag and try to restart the controller */ |
440 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
441 | if (!list_empty(head: &dwc->active_list)) |
442 | dwc_dostart(dwc, first: dwc_first_active(dwc)); |
443 | |
444 | /* |
445 | * WARN may seem harsh, but since this only happens |
446 | * when someone submits a bad physical address in a |
447 | * descriptor, we should consider ourselves lucky that the |
448 | * controller flagged an error instead of scribbling over |
449 | * random memory locations. |
450 | */ |
451 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" |
452 | " cookie: %d\n" , bad_desc->txd.cookie); |
453 | dwc_dump_lli(dwc, desc: bad_desc); |
454 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
455 | dwc_dump_lli(dwc, desc: child); |
456 | |
457 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
458 | |
459 | /* Pretend the descriptor completed successfully */ |
460 | dwc_descriptor_complete(dwc, desc: bad_desc, callback_required: true); |
461 | } |
462 | |
463 | static void dw_dma_tasklet(struct tasklet_struct *t) |
464 | { |
465 | struct dw_dma *dw = from_tasklet(dw, t, tasklet); |
466 | struct dw_dma_chan *dwc; |
467 | u32 status_xfer; |
468 | u32 status_err; |
469 | unsigned int i; |
470 | |
471 | status_xfer = dma_readl(dw, RAW.XFER); |
472 | status_err = dma_readl(dw, RAW.ERROR); |
473 | |
474 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n" , __func__, status_err); |
475 | |
476 | for (i = 0; i < dw->dma.chancnt; i++) { |
477 | dwc = &dw->chan[i]; |
478 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
479 | dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n" ); |
480 | else if (status_err & (1 << i)) |
481 | dwc_handle_error(dw, dwc); |
482 | else if (status_xfer & (1 << i)) |
483 | dwc_scan_descriptors(dw, dwc); |
484 | } |
485 | |
486 | /* Re-enable interrupts */ |
487 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); |
488 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
489 | } |
490 | |
491 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) |
492 | { |
493 | struct dw_dma *dw = dev_id; |
494 | u32 status; |
495 | |
496 | /* Check if we have any interrupt from the DMAC which is not in use */ |
497 | if (!dw->in_use) |
498 | return IRQ_NONE; |
499 | |
500 | status = dma_readl(dw, STATUS_INT); |
501 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n" , __func__, status); |
502 | |
503 | /* Check if we have any interrupt from the DMAC */ |
504 | if (!status) |
505 | return IRQ_NONE; |
506 | |
507 | /* |
508 | * Just disable the interrupts. We'll turn them back on in the |
509 | * softirq handler. |
510 | */ |
511 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
512 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
513 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
514 | |
515 | status = dma_readl(dw, STATUS_INT); |
516 | if (status) { |
517 | dev_err(dw->dma.dev, |
518 | "BUG: Unexpected interrupts pending: 0x%x\n" , |
519 | status); |
520 | |
521 | /* Try to recover */ |
522 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); |
523 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); |
524 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
525 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); |
526 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); |
527 | } |
528 | |
529 | tasklet_schedule(t: &dw->tasklet); |
530 | |
531 | return IRQ_HANDLED; |
532 | } |
533 | |
534 | /*----------------------------------------------------------------------*/ |
535 | |
536 | static struct dma_async_tx_descriptor * |
537 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
538 | size_t len, unsigned long flags) |
539 | { |
540 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
541 | struct dw_dma *dw = to_dw_dma(ddev: chan->device); |
542 | struct dw_desc *desc; |
543 | struct dw_desc *first; |
544 | struct dw_desc *prev; |
545 | size_t xfer_count; |
546 | size_t offset; |
547 | u8 m_master = dwc->dws.m_master; |
548 | unsigned int src_width; |
549 | unsigned int dst_width; |
550 | unsigned int data_width = dw->pdata->data_width[m_master]; |
551 | u32 ctllo, ctlhi; |
552 | u8 lms = DWC_LLP_LMS(m_master); |
553 | |
554 | dev_vdbg(chan2dev(chan), |
555 | "%s: d%pad s%pad l0x%zx f0x%lx\n" , __func__, |
556 | &dest, &src, len, flags); |
557 | |
558 | if (unlikely(!len)) { |
559 | dev_dbg(chan2dev(chan), "%s: length is zero!\n" , __func__); |
560 | return NULL; |
561 | } |
562 | |
563 | dwc->direction = DMA_MEM_TO_MEM; |
564 | |
565 | src_width = dst_width = __ffs(data_width | src | dest | len); |
566 | |
567 | ctllo = dw->prepare_ctllo(dwc) |
568 | | DWC_CTLL_DST_WIDTH(dst_width) |
569 | | DWC_CTLL_SRC_WIDTH(src_width) |
570 | | DWC_CTLL_DST_INC |
571 | | DWC_CTLL_SRC_INC |
572 | | DWC_CTLL_FC_M2M; |
573 | prev = first = NULL; |
574 | |
575 | for (offset = 0; offset < len; offset += xfer_count) { |
576 | desc = dwc_desc_get(dwc); |
577 | if (!desc) |
578 | goto err_desc_get; |
579 | |
580 | ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count); |
581 | |
582 | lli_write(desc, sar, src + offset); |
583 | lli_write(desc, dar, dest + offset); |
584 | lli_write(desc, ctllo, ctllo); |
585 | lli_write(desc, ctlhi, ctlhi); |
586 | desc->len = xfer_count; |
587 | |
588 | if (!first) { |
589 | first = desc; |
590 | } else { |
591 | lli_write(prev, llp, desc->txd.phys | lms); |
592 | list_add_tail(new: &desc->desc_node, head: &first->tx_list); |
593 | } |
594 | prev = desc; |
595 | } |
596 | |
597 | if (flags & DMA_PREP_INTERRUPT) |
598 | /* Trigger interrupt after last block */ |
599 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
600 | |
601 | prev->lli.llp = 0; |
602 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
603 | first->txd.flags = flags; |
604 | first->total_len = len; |
605 | |
606 | return &first->txd; |
607 | |
608 | err_desc_get: |
609 | dwc_desc_put(dwc, desc: first); |
610 | return NULL; |
611 | } |
612 | |
613 | static struct dma_async_tx_descriptor * |
614 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
615 | unsigned int sg_len, enum dma_transfer_direction direction, |
616 | unsigned long flags, void *context) |
617 | { |
618 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
619 | struct dw_dma *dw = to_dw_dma(ddev: chan->device); |
620 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
621 | struct dw_desc *prev; |
622 | struct dw_desc *first; |
623 | u32 ctllo, ctlhi; |
624 | u8 m_master = dwc->dws.m_master; |
625 | u8 lms = DWC_LLP_LMS(m_master); |
626 | dma_addr_t reg; |
627 | unsigned int reg_width; |
628 | unsigned int mem_width; |
629 | unsigned int data_width = dw->pdata->data_width[m_master]; |
630 | unsigned int i; |
631 | struct scatterlist *sg; |
632 | size_t total_len = 0; |
633 | |
634 | dev_vdbg(chan2dev(chan), "%s\n" , __func__); |
635 | |
636 | if (unlikely(!is_slave_direction(direction) || !sg_len)) |
637 | return NULL; |
638 | |
639 | dwc->direction = direction; |
640 | |
641 | prev = first = NULL; |
642 | |
643 | switch (direction) { |
644 | case DMA_MEM_TO_DEV: |
645 | reg_width = __ffs(sconfig->dst_addr_width); |
646 | reg = sconfig->dst_addr; |
647 | ctllo = dw->prepare_ctllo(dwc) |
648 | | DWC_CTLL_DST_WIDTH(reg_width) |
649 | | DWC_CTLL_DST_FIX |
650 | | DWC_CTLL_SRC_INC; |
651 | |
652 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
653 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
654 | |
655 | for_each_sg(sgl, sg, sg_len, i) { |
656 | struct dw_desc *desc; |
657 | u32 len, mem; |
658 | size_t dlen; |
659 | |
660 | mem = sg_dma_address(sg); |
661 | len = sg_dma_len(sg); |
662 | |
663 | mem_width = __ffs(data_width | mem | len); |
664 | |
665 | slave_sg_todev_fill_desc: |
666 | desc = dwc_desc_get(dwc); |
667 | if (!desc) |
668 | goto err_desc_get; |
669 | |
670 | ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen); |
671 | |
672 | lli_write(desc, sar, mem); |
673 | lli_write(desc, dar, reg); |
674 | lli_write(desc, ctlhi, ctlhi); |
675 | lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); |
676 | desc->len = dlen; |
677 | |
678 | if (!first) { |
679 | first = desc; |
680 | } else { |
681 | lli_write(prev, llp, desc->txd.phys | lms); |
682 | list_add_tail(new: &desc->desc_node, head: &first->tx_list); |
683 | } |
684 | prev = desc; |
685 | |
686 | mem += dlen; |
687 | len -= dlen; |
688 | total_len += dlen; |
689 | |
690 | if (len) |
691 | goto slave_sg_todev_fill_desc; |
692 | } |
693 | break; |
694 | case DMA_DEV_TO_MEM: |
695 | reg_width = __ffs(sconfig->src_addr_width); |
696 | reg = sconfig->src_addr; |
697 | ctllo = dw->prepare_ctllo(dwc) |
698 | | DWC_CTLL_SRC_WIDTH(reg_width) |
699 | | DWC_CTLL_DST_INC |
700 | | DWC_CTLL_SRC_FIX; |
701 | |
702 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
703 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
704 | |
705 | for_each_sg(sgl, sg, sg_len, i) { |
706 | struct dw_desc *desc; |
707 | u32 len, mem; |
708 | size_t dlen; |
709 | |
710 | mem = sg_dma_address(sg); |
711 | len = sg_dma_len(sg); |
712 | |
713 | slave_sg_fromdev_fill_desc: |
714 | desc = dwc_desc_get(dwc); |
715 | if (!desc) |
716 | goto err_desc_get; |
717 | |
718 | ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen); |
719 | |
720 | lli_write(desc, sar, reg); |
721 | lli_write(desc, dar, mem); |
722 | lli_write(desc, ctlhi, ctlhi); |
723 | mem_width = __ffs(data_width | mem); |
724 | lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); |
725 | desc->len = dlen; |
726 | |
727 | if (!first) { |
728 | first = desc; |
729 | } else { |
730 | lli_write(prev, llp, desc->txd.phys | lms); |
731 | list_add_tail(new: &desc->desc_node, head: &first->tx_list); |
732 | } |
733 | prev = desc; |
734 | |
735 | mem += dlen; |
736 | len -= dlen; |
737 | total_len += dlen; |
738 | |
739 | if (len) |
740 | goto slave_sg_fromdev_fill_desc; |
741 | } |
742 | break; |
743 | default: |
744 | return NULL; |
745 | } |
746 | |
747 | if (flags & DMA_PREP_INTERRUPT) |
748 | /* Trigger interrupt after last block */ |
749 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); |
750 | |
751 | prev->lli.llp = 0; |
752 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
753 | first->total_len = total_len; |
754 | |
755 | return &first->txd; |
756 | |
757 | err_desc_get: |
758 | dev_err(chan2dev(chan), |
759 | "not enough descriptors available. Direction %d\n" , direction); |
760 | dwc_desc_put(dwc, desc: first); |
761 | return NULL; |
762 | } |
763 | |
764 | bool dw_dma_filter(struct dma_chan *chan, void *param) |
765 | { |
766 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
767 | struct dw_dma_slave *dws = param; |
768 | |
769 | if (dws->dma_dev != chan->device->dev) |
770 | return false; |
771 | |
772 | /* permit channels in accordance with the channels mask */ |
773 | if (dws->channels && !(dws->channels & dwc->mask)) |
774 | return false; |
775 | |
776 | /* We have to copy data since dws can be temporary storage */ |
777 | memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave)); |
778 | |
779 | return true; |
780 | } |
781 | EXPORT_SYMBOL_GPL(dw_dma_filter); |
782 | |
783 | static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) |
784 | { |
785 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
786 | struct dw_dma *dw = to_dw_dma(ddev: chan->device); |
787 | |
788 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); |
789 | |
790 | dwc->dma_sconfig.src_maxburst = |
791 | clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst); |
792 | dwc->dma_sconfig.dst_maxburst = |
793 | clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst); |
794 | |
795 | dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst); |
796 | dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst); |
797 | |
798 | return 0; |
799 | } |
800 | |
801 | static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain) |
802 | { |
803 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); |
804 | unsigned int count = 20; /* timeout iterations */ |
805 | |
806 | dw->suspend_chan(dwc, drain); |
807 | |
808 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) |
809 | udelay(2); |
810 | |
811 | set_bit(nr: DW_DMA_IS_PAUSED, addr: &dwc->flags); |
812 | } |
813 | |
814 | static int dwc_pause(struct dma_chan *chan) |
815 | { |
816 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
817 | unsigned long flags; |
818 | |
819 | spin_lock_irqsave(&dwc->lock, flags); |
820 | dwc_chan_pause(dwc, drain: false); |
821 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
822 | |
823 | return 0; |
824 | } |
825 | |
826 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain) |
827 | { |
828 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); |
829 | |
830 | dw->resume_chan(dwc, drain); |
831 | |
832 | clear_bit(nr: DW_DMA_IS_PAUSED, addr: &dwc->flags); |
833 | } |
834 | |
835 | static int dwc_resume(struct dma_chan *chan) |
836 | { |
837 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
838 | unsigned long flags; |
839 | |
840 | spin_lock_irqsave(&dwc->lock, flags); |
841 | |
842 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) |
843 | dwc_chan_resume(dwc, drain: false); |
844 | |
845 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
846 | |
847 | return 0; |
848 | } |
849 | |
850 | static int dwc_terminate_all(struct dma_chan *chan) |
851 | { |
852 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
853 | struct dw_dma *dw = to_dw_dma(ddev: chan->device); |
854 | struct dw_desc *desc, *_desc; |
855 | unsigned long flags; |
856 | LIST_HEAD(list); |
857 | |
858 | spin_lock_irqsave(&dwc->lock, flags); |
859 | |
860 | clear_bit(nr: DW_DMA_IS_SOFT_LLP, addr: &dwc->flags); |
861 | |
862 | dwc_chan_pause(dwc, drain: true); |
863 | |
864 | dwc_chan_disable(dw, dwc); |
865 | |
866 | dwc_chan_resume(dwc, drain: true); |
867 | |
868 | /* active_list entries will end up before queued entries */ |
869 | list_splice_init(list: &dwc->queue, head: &list); |
870 | list_splice_init(list: &dwc->active_list, head: &list); |
871 | |
872 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
873 | |
874 | /* Flush all pending and queued descriptors */ |
875 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
876 | dwc_descriptor_complete(dwc, desc, callback_required: false); |
877 | |
878 | return 0; |
879 | } |
880 | |
881 | static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c) |
882 | { |
883 | struct dw_desc *desc; |
884 | |
885 | list_for_each_entry(desc, &dwc->active_list, desc_node) |
886 | if (desc->txd.cookie == c) |
887 | return desc; |
888 | |
889 | return NULL; |
890 | } |
891 | |
892 | static u32 dwc_get_residue_and_status(struct dw_dma_chan *dwc, dma_cookie_t cookie, |
893 | enum dma_status *status) |
894 | { |
895 | struct dw_desc *desc; |
896 | unsigned long flags; |
897 | u32 residue; |
898 | |
899 | spin_lock_irqsave(&dwc->lock, flags); |
900 | |
901 | desc = dwc_find_desc(dwc, c: cookie); |
902 | if (desc) { |
903 | if (desc == dwc_first_active(dwc)) { |
904 | residue = desc->residue; |
905 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) |
906 | residue -= dwc_get_sent(dwc); |
907 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) |
908 | *status = DMA_PAUSED; |
909 | } else { |
910 | residue = desc->total_len; |
911 | } |
912 | } else { |
913 | residue = 0; |
914 | } |
915 | |
916 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
917 | return residue; |
918 | } |
919 | |
920 | static enum dma_status |
921 | dwc_tx_status(struct dma_chan *chan, |
922 | dma_cookie_t cookie, |
923 | struct dma_tx_state *txstate) |
924 | { |
925 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
926 | enum dma_status ret; |
927 | |
928 | ret = dma_cookie_status(chan, cookie, state: txstate); |
929 | if (ret == DMA_COMPLETE) |
930 | return ret; |
931 | |
932 | dwc_scan_descriptors(dw: to_dw_dma(ddev: chan->device), dwc); |
933 | |
934 | ret = dma_cookie_status(chan, cookie, state: txstate); |
935 | if (ret == DMA_COMPLETE) |
936 | return ret; |
937 | |
938 | dma_set_residue(state: txstate, residue: dwc_get_residue_and_status(dwc, cookie, status: &ret)); |
939 | return ret; |
940 | } |
941 | |
942 | static void dwc_issue_pending(struct dma_chan *chan) |
943 | { |
944 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
945 | unsigned long flags; |
946 | |
947 | spin_lock_irqsave(&dwc->lock, flags); |
948 | if (list_empty(head: &dwc->active_list)) |
949 | dwc_dostart_first_queued(dwc); |
950 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
951 | } |
952 | |
953 | /*----------------------------------------------------------------------*/ |
954 | |
955 | void do_dw_dma_off(struct dw_dma *dw) |
956 | { |
957 | dma_writel(dw, CFG, 0); |
958 | |
959 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
960 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
961 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
962 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); |
963 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
964 | |
965 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) |
966 | cpu_relax(); |
967 | } |
968 | |
969 | void do_dw_dma_on(struct dw_dma *dw) |
970 | { |
971 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
972 | } |
973 | |
974 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
975 | { |
976 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
977 | struct dw_dma *dw = to_dw_dma(ddev: chan->device); |
978 | |
979 | dev_vdbg(chan2dev(chan), "%s\n" , __func__); |
980 | |
981 | /* ASSERT: channel is idle */ |
982 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
983 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n" ); |
984 | return -EIO; |
985 | } |
986 | |
987 | dma_cookie_init(chan); |
988 | |
989 | /* |
990 | * NOTE: some controllers may have additional features that we |
991 | * need to initialize here, like "scatter-gather" (which |
992 | * doesn't mean what you think it means), and status writeback. |
993 | */ |
994 | |
995 | /* |
996 | * We need controller-specific data to set up slave transfers. |
997 | */ |
998 | if (chan->private && !dw_dma_filter(chan, chan->private)) { |
999 | dev_warn(chan2dev(chan), "Wrong controller-specific data\n" ); |
1000 | return -EINVAL; |
1001 | } |
1002 | |
1003 | /* Enable controller here if needed */ |
1004 | if (!dw->in_use) |
1005 | do_dw_dma_on(dw); |
1006 | dw->in_use |= dwc->mask; |
1007 | |
1008 | return 0; |
1009 | } |
1010 | |
1011 | static void dwc_free_chan_resources(struct dma_chan *chan) |
1012 | { |
1013 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1014 | struct dw_dma *dw = to_dw_dma(ddev: chan->device); |
1015 | unsigned long flags; |
1016 | |
1017 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n" , __func__, |
1018 | dwc->descs_allocated); |
1019 | |
1020 | /* ASSERT: channel is idle */ |
1021 | BUG_ON(!list_empty(&dwc->active_list)); |
1022 | BUG_ON(!list_empty(&dwc->queue)); |
1023 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); |
1024 | |
1025 | spin_lock_irqsave(&dwc->lock, flags); |
1026 | |
1027 | /* Clear custom channel configuration */ |
1028 | memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); |
1029 | |
1030 | /* Disable interrupts */ |
1031 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
1032 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); |
1033 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1034 | |
1035 | spin_unlock_irqrestore(lock: &dwc->lock, flags); |
1036 | |
1037 | /* Disable controller in case it was a last user */ |
1038 | dw->in_use &= ~dwc->mask; |
1039 | if (!dw->in_use) |
1040 | do_dw_dma_off(dw); |
1041 | |
1042 | dev_vdbg(chan2dev(chan), "%s: done\n" , __func__); |
1043 | } |
1044 | |
1045 | static void dwc_caps(struct dma_chan *chan, struct dma_slave_caps *caps) |
1046 | { |
1047 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1048 | |
1049 | caps->max_burst = dwc->max_burst; |
1050 | |
1051 | /* |
1052 | * It might be crucial for some devices to have the hardware |
1053 | * accelerated multi-block transfers supported, aka LLPs in DW DMAC |
1054 | * notation. So if LLPs are supported then max_sg_burst is set to |
1055 | * zero which means unlimited number of SG entries can be handled in a |
1056 | * single DMA transaction, otherwise it's just one SG entry. |
1057 | */ |
1058 | if (dwc->nollp) |
1059 | caps->max_sg_burst = 1; |
1060 | else |
1061 | caps->max_sg_burst = 0; |
1062 | } |
1063 | |
1064 | int do_dma_probe(struct dw_dma_chip *chip) |
1065 | { |
1066 | struct dw_dma *dw = chip->dw; |
1067 | struct dw_dma_platform_data *pdata; |
1068 | bool autocfg = false; |
1069 | unsigned int dw_params; |
1070 | unsigned int i; |
1071 | int err; |
1072 | |
1073 | dw->pdata = devm_kzalloc(dev: chip->dev, size: sizeof(*dw->pdata), GFP_KERNEL); |
1074 | if (!dw->pdata) |
1075 | return -ENOMEM; |
1076 | |
1077 | dw->regs = chip->regs; |
1078 | |
1079 | pm_runtime_get_sync(dev: chip->dev); |
1080 | |
1081 | if (!chip->pdata) { |
1082 | dw_params = dma_readl(dw, DW_PARAMS); |
1083 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n" , dw_params); |
1084 | |
1085 | autocfg = dw_params >> DW_PARAMS_EN & 1; |
1086 | if (!autocfg) { |
1087 | err = -EINVAL; |
1088 | goto err_pdata; |
1089 | } |
1090 | |
1091 | /* Reassign the platform data pointer */ |
1092 | pdata = dw->pdata; |
1093 | |
1094 | /* Get hardware configuration parameters */ |
1095 | pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; |
1096 | pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; |
1097 | for (i = 0; i < pdata->nr_masters; i++) { |
1098 | pdata->data_width[i] = |
1099 | 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3); |
1100 | } |
1101 | pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); |
1102 | |
1103 | /* Fill platform data with the default values */ |
1104 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; |
1105 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; |
1106 | } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { |
1107 | err = -EINVAL; |
1108 | goto err_pdata; |
1109 | } else { |
1110 | memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata)); |
1111 | |
1112 | /* Reassign the platform data pointer */ |
1113 | pdata = dw->pdata; |
1114 | } |
1115 | |
1116 | dw->chan = devm_kcalloc(dev: chip->dev, n: pdata->nr_channels, size: sizeof(*dw->chan), |
1117 | GFP_KERNEL); |
1118 | if (!dw->chan) { |
1119 | err = -ENOMEM; |
1120 | goto err_pdata; |
1121 | } |
1122 | |
1123 | /* Calculate all channel mask before DMA setup */ |
1124 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
1125 | |
1126 | /* Force dma off, just in case */ |
1127 | dw->disable(dw); |
1128 | |
1129 | /* Device and instance ID for IRQ and DMA pool */ |
1130 | dw->set_device_name(dw, chip->id); |
1131 | |
1132 | /* Create a pool of consistent memory blocks for hardware descriptors */ |
1133 | dw->desc_pool = dmam_pool_create(name: dw->name, dev: chip->dev, |
1134 | size: sizeof(struct dw_desc), align: 4, allocation: 0); |
1135 | if (!dw->desc_pool) { |
1136 | dev_err(chip->dev, "No memory for descriptors dma pool\n" ); |
1137 | err = -ENOMEM; |
1138 | goto err_pdata; |
1139 | } |
1140 | |
1141 | tasklet_setup(t: &dw->tasklet, callback: dw_dma_tasklet); |
1142 | |
1143 | err = request_irq(irq: chip->irq, handler: dw_dma_interrupt, IRQF_SHARED, |
1144 | name: dw->name, dev: dw); |
1145 | if (err) |
1146 | goto err_pdata; |
1147 | |
1148 | INIT_LIST_HEAD(list: &dw->dma.channels); |
1149 | for (i = 0; i < pdata->nr_channels; i++) { |
1150 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1151 | |
1152 | dwc->chan.device = &dw->dma; |
1153 | dma_cookie_init(chan: &dwc->chan); |
1154 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1155 | list_add_tail(new: &dwc->chan.device_node, |
1156 | head: &dw->dma.channels); |
1157 | else |
1158 | list_add(new: &dwc->chan.device_node, head: &dw->dma.channels); |
1159 | |
1160 | /* 7 is highest priority & 0 is lowest. */ |
1161 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) |
1162 | dwc->priority = pdata->nr_channels - i - 1; |
1163 | else |
1164 | dwc->priority = i; |
1165 | |
1166 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1167 | spin_lock_init(&dwc->lock); |
1168 | dwc->mask = 1 << i; |
1169 | |
1170 | INIT_LIST_HEAD(list: &dwc->active_list); |
1171 | INIT_LIST_HEAD(list: &dwc->queue); |
1172 | |
1173 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1174 | |
1175 | dwc->direction = DMA_TRANS_NONE; |
1176 | |
1177 | /* Hardware configuration */ |
1178 | if (autocfg) { |
1179 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; |
1180 | void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; |
1181 | unsigned int dwc_params = readl(addr); |
1182 | |
1183 | dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n" , i, |
1184 | dwc_params); |
1185 | |
1186 | /* |
1187 | * Decode maximum block size for given channel. The |
1188 | * stored 4 bit value represents blocks from 0x00 for 3 |
1189 | * up to 0x0a for 4095. |
1190 | */ |
1191 | dwc->block_size = |
1192 | (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1; |
1193 | |
1194 | /* |
1195 | * According to the DW DMA databook the true scatter- |
1196 | * gether LLPs aren't available if either multi-block |
1197 | * config is disabled (CHx_MULTI_BLK_EN == 0) or the |
1198 | * LLP register is hard-coded to zeros |
1199 | * (CHx_HC_LLP == 1). |
1200 | */ |
1201 | dwc->nollp = |
1202 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0 || |
1203 | (dwc_params >> DWC_PARAMS_HC_LLP & 0x1) == 1; |
1204 | dwc->max_burst = |
1205 | (0x4 << (dwc_params >> DWC_PARAMS_MSIZE & 0x7)); |
1206 | } else { |
1207 | dwc->block_size = pdata->block_size; |
1208 | dwc->nollp = !pdata->multi_block[i]; |
1209 | dwc->max_burst = pdata->max_burst[i] ?: DW_DMA_MAX_BURST; |
1210 | } |
1211 | } |
1212 | |
1213 | /* Clear all interrupts on all channels. */ |
1214 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
1215 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); |
1216 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1217 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); |
1218 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); |
1219 | |
1220 | /* Set capabilities */ |
1221 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1222 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); |
1223 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1224 | |
1225 | dw->dma.dev = chip->dev; |
1226 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
1227 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; |
1228 | |
1229 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; |
1230 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; |
1231 | |
1232 | dw->dma.device_caps = dwc_caps; |
1233 | dw->dma.device_config = dwc_config; |
1234 | dw->dma.device_pause = dwc_pause; |
1235 | dw->dma.device_resume = dwc_resume; |
1236 | dw->dma.device_terminate_all = dwc_terminate_all; |
1237 | |
1238 | dw->dma.device_tx_status = dwc_tx_status; |
1239 | dw->dma.device_issue_pending = dwc_issue_pending; |
1240 | |
1241 | /* DMA capabilities */ |
1242 | dw->dma.min_burst = DW_DMA_MIN_BURST; |
1243 | dw->dma.max_burst = DW_DMA_MAX_BURST; |
1244 | dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; |
1245 | dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; |
1246 | dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | |
1247 | BIT(DMA_MEM_TO_MEM); |
1248 | dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1249 | |
1250 | /* |
1251 | * For now there is no hardware with non uniform maximum block size |
1252 | * across all of the device channels, so we set the maximum segment |
1253 | * size as the block size found for the very first channel. |
1254 | */ |
1255 | dma_set_max_seg_size(dev: dw->dma.dev, size: dw->chan[0].block_size); |
1256 | |
1257 | err = dma_async_device_register(device: &dw->dma); |
1258 | if (err) |
1259 | goto err_dma_register; |
1260 | |
1261 | dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n" , |
1262 | pdata->nr_channels); |
1263 | |
1264 | pm_runtime_put_sync_suspend(dev: chip->dev); |
1265 | |
1266 | return 0; |
1267 | |
1268 | err_dma_register: |
1269 | free_irq(chip->irq, dw); |
1270 | err_pdata: |
1271 | pm_runtime_put_sync_suspend(dev: chip->dev); |
1272 | return err; |
1273 | } |
1274 | |
1275 | int do_dma_remove(struct dw_dma_chip *chip) |
1276 | { |
1277 | struct dw_dma *dw = chip->dw; |
1278 | struct dw_dma_chan *dwc, *_dwc; |
1279 | |
1280 | pm_runtime_get_sync(dev: chip->dev); |
1281 | |
1282 | do_dw_dma_off(dw); |
1283 | dma_async_device_unregister(device: &dw->dma); |
1284 | |
1285 | free_irq(chip->irq, dw); |
1286 | tasklet_kill(t: &dw->tasklet); |
1287 | |
1288 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, |
1289 | chan.device_node) { |
1290 | list_del(entry: &dwc->chan.device_node); |
1291 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1292 | } |
1293 | |
1294 | pm_runtime_put_sync_suspend(dev: chip->dev); |
1295 | return 0; |
1296 | } |
1297 | |
1298 | int do_dw_dma_disable(struct dw_dma_chip *chip) |
1299 | { |
1300 | struct dw_dma *dw = chip->dw; |
1301 | |
1302 | dw->disable(dw); |
1303 | return 0; |
1304 | } |
1305 | EXPORT_SYMBOL_GPL(do_dw_dma_disable); |
1306 | |
1307 | int do_dw_dma_enable(struct dw_dma_chip *chip) |
1308 | { |
1309 | struct dw_dma *dw = chip->dw; |
1310 | |
1311 | dw->enable(dw); |
1312 | return 0; |
1313 | } |
1314 | EXPORT_SYMBOL_GPL(do_dw_dma_enable); |
1315 | |
1316 | MODULE_LICENSE("GPL v2" ); |
1317 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver" ); |
1318 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)" ); |
1319 | MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>" ); |
1320 | |