1// SPDX-License-Identifier: GPL-2.0
2// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
3
4/*
5 * Synopsys DesignWare AXI DMA Controller driver.
6 *
7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
8 */
9
10#include <linux/bitops.h>
11#include <linux/delay.h>
12#include <linux/device.h>
13#include <linux/dmaengine.h>
14#include <linux/dmapool.h>
15#include <linux/dma-mapping.h>
16#include <linux/err.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/iopoll.h>
20#include <linux/io-64-nonatomic-lo-hi.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_dma.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/property.h>
28#include <linux/reset.h>
29#include <linux/slab.h>
30#include <linux/types.h>
31
32#include "dw-axi-dmac.h"
33#include "../dmaengine.h"
34#include "../virt-dma.h"
35
36/*
37 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
38 * master data bus width up to 512 bits (for both AXI master interfaces), but
39 * it depends on IP block configuration.
40 */
41#define AXI_DMA_BUSWIDTHS \
42 (DMA_SLAVE_BUSWIDTH_1_BYTE | \
43 DMA_SLAVE_BUSWIDTH_2_BYTES | \
44 DMA_SLAVE_BUSWIDTH_4_BYTES | \
45 DMA_SLAVE_BUSWIDTH_8_BYTES | \
46 DMA_SLAVE_BUSWIDTH_16_BYTES | \
47 DMA_SLAVE_BUSWIDTH_32_BYTES | \
48 DMA_SLAVE_BUSWIDTH_64_BYTES)
49
50#define AXI_DMA_FLAG_HAS_APB_REGS BIT(0)
51#define AXI_DMA_FLAG_HAS_RESETS BIT(1)
52#define AXI_DMA_FLAG_USE_CFG2 BIT(2)
53
54static inline void
55axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
56{
57 iowrite32(val, chip->regs + reg);
58}
59
60static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
61{
62 return ioread32(chip->regs + reg);
63}
64
65static inline void
66axi_dma_iowrite64(struct axi_dma_chip *chip, u32 reg, u64 val)
67{
68 iowrite64(val, addr: chip->regs + reg);
69}
70
71static inline u64 axi_dma_ioread64(struct axi_dma_chip *chip, u32 reg)
72{
73 return ioread64(addr: chip->regs + reg);
74}
75
76static inline void
77axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
78{
79 iowrite32(val, chan->chan_regs + reg);
80}
81
82static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
83{
84 return ioread32(chan->chan_regs + reg);
85}
86
87static inline void
88axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
89{
90 /*
91 * We split one 64 bit write for two 32 bit write as some HW doesn't
92 * support 64 bit access.
93 */
94 iowrite32(lower_32_bits(val), chan->chan_regs + reg);
95 iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
96}
97
98static inline void axi_chan_config_write(struct axi_dma_chan *chan,
99 struct axi_dma_chan_config *config)
100{
101 u32 cfg_lo, cfg_hi;
102
103 cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
104 config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
105 if (chan->chip->dw->hdata->reg_map_8_channels &&
106 !chan->chip->dw->hdata->use_cfg2) {
107 cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
108 config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
109 config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
110 config->src_per << CH_CFG_H_SRC_PER_POS |
111 config->dst_per << CH_CFG_H_DST_PER_POS |
112 config->prior << CH_CFG_H_PRIORITY_POS;
113 } else {
114 cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |
115 config->dst_per << CH_CFG2_L_DST_PER_POS;
116 cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |
117 config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |
118 config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |
119 config->prior << CH_CFG2_H_PRIORITY_POS;
120 }
121 axi_chan_iowrite32(chan, CH_CFG_L, val: cfg_lo);
122 axi_chan_iowrite32(chan, CH_CFG_H, val: cfg_hi);
123}
124
125static inline void axi_dma_disable(struct axi_dma_chip *chip)
126{
127 u32 val;
128
129 val = axi_dma_ioread32(chip, DMAC_CFG);
130 val &= ~DMAC_EN_MASK;
131 axi_dma_iowrite32(chip, DMAC_CFG, val);
132}
133
134static inline void axi_dma_enable(struct axi_dma_chip *chip)
135{
136 u32 val;
137
138 val = axi_dma_ioread32(chip, DMAC_CFG);
139 val |= DMAC_EN_MASK;
140 axi_dma_iowrite32(chip, DMAC_CFG, val);
141}
142
143static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
144{
145 u32 val;
146
147 val = axi_dma_ioread32(chip, DMAC_CFG);
148 val &= ~INT_EN_MASK;
149 axi_dma_iowrite32(chip, DMAC_CFG, val);
150}
151
152static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
153{
154 u32 val;
155
156 val = axi_dma_ioread32(chip, DMAC_CFG);
157 val |= INT_EN_MASK;
158 axi_dma_iowrite32(chip, DMAC_CFG, val);
159}
160
161static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
162{
163 u32 val;
164
165 if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
166 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val: DWAXIDMAC_IRQ_NONE);
167 } else {
168 val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
169 val &= ~irq_mask;
170 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
171 }
172}
173
174static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
175{
176 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val: irq_mask);
177}
178
179static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
180{
181 axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, val: irq_mask);
182}
183
184static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
185{
186 axi_chan_iowrite32(chan, CH_INTCLEAR, val: irq_mask);
187}
188
189static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
190{
191 return axi_chan_ioread32(chan, CH_INTSTATUS);
192}
193
194static inline void axi_chan_disable(struct axi_dma_chan *chan)
195{
196 u64 val;
197
198 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
199 val = axi_dma_ioread64(chip: chan->chip, DMAC_CHEN);
200 if (chan->id >= DMAC_CHAN_16) {
201 val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
202 << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
203 val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
204 << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
205 } else {
206 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
207 val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
208 }
209 axi_dma_iowrite64(chip: chan->chip, DMAC_CHEN, val);
210 } else {
211 val = axi_dma_ioread32(chip: chan->chip, DMAC_CHEN);
212 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
213 if (chan->chip->dw->hdata->reg_map_8_channels)
214 val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
215 else
216 val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
217 axi_dma_iowrite32(chip: chan->chip, DMAC_CHEN, val: (u32)val);
218 }
219}
220
221static inline void axi_chan_enable(struct axi_dma_chan *chan)
222{
223 u64 val;
224
225 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
226 val = axi_dma_ioread64(chip: chan->chip, DMAC_CHEN);
227 if (chan->id >= DMAC_CHAN_16) {
228 val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
229 << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
230 (u64)(BIT(chan->id) >> DMAC_CHAN_16)
231 << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
232 } else {
233 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
234 BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
235 }
236 axi_dma_iowrite64(chip: chan->chip, DMAC_CHEN, val);
237 } else {
238 val = axi_dma_ioread32(chip: chan->chip, DMAC_CHEN);
239 if (chan->chip->dw->hdata->reg_map_8_channels) {
240 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
241 BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
242 } else {
243 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
244 BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
245 }
246 axi_dma_iowrite32(chip: chan->chip, DMAC_CHEN, val: (u32)val);
247 }
248}
249
250static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
251{
252 u64 val;
253
254 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16)
255 val = axi_dma_ioread64(chip: chan->chip, DMAC_CHEN);
256 else
257 val = axi_dma_ioread32(chip: chan->chip, DMAC_CHEN);
258
259 if (chan->id >= DMAC_CHAN_16)
260 return !!(val & ((u64)(BIT(chan->id) >> DMAC_CHAN_16) << DMAC_CHAN_BLOCK_SHIFT));
261 else
262 return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
263}
264
265static void axi_dma_hw_init(struct axi_dma_chip *chip)
266{
267 int ret;
268 u32 i;
269
270 for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
271 axi_chan_irq_disable(chan: &chip->dw->chan[i], irq_mask: DWAXIDMAC_IRQ_ALL);
272 axi_chan_disable(chan: &chip->dw->chan[i]);
273 }
274 ret = dma_set_mask_and_coherent(dev: chip->dev, DMA_BIT_MASK(64));
275 if (ret)
276 dev_warn(chip->dev, "Unable to set coherent mask\n");
277}
278
279static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
280 dma_addr_t dst, size_t len)
281{
282 u32 max_width = chan->chip->dw->hdata->m_data_width;
283
284 return __ffs(src | dst | len | BIT(max_width));
285}
286
287static inline const char *axi_chan_name(struct axi_dma_chan *chan)
288{
289 return dma_chan_name(chan: &chan->vc.chan);
290}
291
292static struct axi_dma_desc *axi_desc_alloc(u32 num)
293{
294 struct axi_dma_desc *desc;
295
296 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
297 if (!desc)
298 return NULL;
299
300 desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
301 if (!desc->hw_desc) {
302 kfree(objp: desc);
303 return NULL;
304 }
305 desc->nr_hw_descs = num;
306
307 return desc;
308}
309
310static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
311 dma_addr_t *addr)
312{
313 struct axi_dma_lli *lli;
314 dma_addr_t phys;
315
316 lli = dma_pool_zalloc(pool: chan->desc_pool, GFP_NOWAIT, handle: &phys);
317 if (unlikely(!lli)) {
318 dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
319 axi_chan_name(chan));
320 return NULL;
321 }
322
323 atomic_inc(v: &chan->descs_allocated);
324 *addr = phys;
325
326 return lli;
327}
328
329static void axi_desc_put(struct axi_dma_desc *desc)
330{
331 struct axi_dma_chan *chan = desc->chan;
332 int count = desc->nr_hw_descs;
333 struct axi_dma_hw_desc *hw_desc;
334 int descs_put;
335
336 for (descs_put = 0; descs_put < count; descs_put++) {
337 hw_desc = &desc->hw_desc[descs_put];
338 dma_pool_free(pool: chan->desc_pool, vaddr: hw_desc->lli, addr: hw_desc->llp);
339 }
340
341 kfree(objp: desc->hw_desc);
342 kfree(objp: desc);
343 atomic_sub(i: descs_put, v: &chan->descs_allocated);
344 dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
345 axi_chan_name(chan), descs_put,
346 atomic_read(&chan->descs_allocated));
347}
348
349static void vchan_desc_put(struct virt_dma_desc *vdesc)
350{
351 axi_desc_put(desc: vd_to_axi_desc(vd: vdesc));
352}
353
354static enum dma_status
355dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
356 struct dma_tx_state *txstate)
357{
358 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
359 struct virt_dma_desc *vdesc;
360 enum dma_status status;
361 u32 completed_length;
362 unsigned long flags;
363 u32 completed_blocks;
364 size_t bytes = 0;
365 u32 length;
366 u32 len;
367
368 status = dma_cookie_status(chan: dchan, cookie, state: txstate);
369 if (status == DMA_COMPLETE || !txstate)
370 return status;
371
372 spin_lock_irqsave(&chan->vc.lock, flags);
373
374 vdesc = vchan_find_desc(&chan->vc, cookie);
375 if (vdesc) {
376 length = vd_to_axi_desc(vd: vdesc)->length;
377 completed_blocks = vd_to_axi_desc(vd: vdesc)->completed_blocks;
378 len = vd_to_axi_desc(vd: vdesc)->hw_desc[0].len;
379 completed_length = completed_blocks * len;
380 bytes = length - completed_length;
381 }
382
383 spin_unlock_irqrestore(lock: &chan->vc.lock, flags);
384 dma_set_residue(state: txstate, residue: bytes);
385
386 return status;
387}
388
389static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
390{
391 desc->lli->llp = cpu_to_le64(adr);
392}
393
394static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
395{
396 axi_chan_iowrite64(chan, CH_LLP, val: adr);
397}
398
399static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
400{
401 u32 offset = DMAC_APB_BYTE_WR_CH_EN;
402 u32 reg_width, val;
403
404 if (!chan->chip->apb_regs) {
405 dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
406 return;
407 }
408
409 reg_width = __ffs(chan->config.dst_addr_width);
410 if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
411 offset = DMAC_APB_HALFWORD_WR_CH_EN;
412
413 val = ioread32(chan->chip->apb_regs + offset);
414
415 if (set)
416 val |= BIT(chan->id);
417 else
418 val &= ~BIT(chan->id);
419
420 iowrite32(val, chan->chip->apb_regs + offset);
421}
422/* Called in chan locked context */
423static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
424 struct axi_dma_desc *first)
425{
426 u32 priority = chan->chip->dw->hdata->priority[chan->id];
427 struct axi_dma_chan_config config = {};
428 u32 irq_mask;
429 u8 lms = 0; /* Select AXI0 master for LLI fetching */
430
431 if (unlikely(axi_chan_is_hw_enable(chan))) {
432 dev_err(chan2dev(chan), "%s is non-idle!\n",
433 axi_chan_name(chan));
434
435 return;
436 }
437
438 axi_dma_enable(chip: chan->chip);
439
440 config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
441 config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
442 config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
443 config.prior = priority;
444 config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
445 config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
446 switch (chan->direction) {
447 case DMA_MEM_TO_DEV:
448 dw_axi_dma_set_byte_halfword(chan, set: true);
449 config.tt_fc = chan->config.device_fc ?
450 DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
451 DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;
452 if (chan->chip->apb_regs)
453 config.dst_per = chan->id;
454 else
455 config.dst_per = chan->hw_handshake_num;
456 break;
457 case DMA_DEV_TO_MEM:
458 config.tt_fc = chan->config.device_fc ?
459 DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
460 DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;
461 if (chan->chip->apb_regs)
462 config.src_per = chan->id;
463 else
464 config.src_per = chan->hw_handshake_num;
465 break;
466 default:
467 break;
468 }
469 axi_chan_config_write(chan, config: &config);
470
471 write_chan_llp(chan, adr: first->hw_desc[0].llp | lms);
472
473 irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
474 axi_chan_irq_sig_set(chan, irq_mask);
475
476 /* Generate 'suspend' status but don't generate interrupt */
477 irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
478 axi_chan_irq_set(chan, irq_mask);
479
480 axi_chan_enable(chan);
481}
482
483static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
484{
485 struct axi_dma_desc *desc;
486 struct virt_dma_desc *vd;
487
488 vd = vchan_next_desc(vc: &chan->vc);
489 if (!vd)
490 return;
491
492 desc = vd_to_axi_desc(vd);
493 dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
494 vd->tx.cookie);
495 axi_chan_block_xfer_start(chan, first: desc);
496}
497
498static void dma_chan_issue_pending(struct dma_chan *dchan)
499{
500 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
501 unsigned long flags;
502
503 spin_lock_irqsave(&chan->vc.lock, flags);
504 if (vchan_issue_pending(vc: &chan->vc))
505 axi_chan_start_first_queued(chan);
506 spin_unlock_irqrestore(lock: &chan->vc.lock, flags);
507}
508
509static void dw_axi_dma_synchronize(struct dma_chan *dchan)
510{
511 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
512
513 vchan_synchronize(vc: &chan->vc);
514}
515
516static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
517{
518 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
519
520 /* ASSERT: channel is idle */
521 if (axi_chan_is_hw_enable(chan)) {
522 dev_err(chan2dev(chan), "%s is non-idle!\n",
523 axi_chan_name(chan));
524 return -EBUSY;
525 }
526
527 /* LLI address must be aligned to a 64-byte boundary */
528 chan->desc_pool = dma_pool_create(name: dev_name(dev: chan2dev(chan)),
529 dev: chan->chip->dev,
530 size: sizeof(struct axi_dma_lli),
531 align: 64, boundary: 0);
532 if (!chan->desc_pool) {
533 dev_err(chan2dev(chan), "No memory for descriptors\n");
534 return -ENOMEM;
535 }
536 dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
537
538 pm_runtime_get(dev: chan->chip->dev);
539
540 return 0;
541}
542
543static void dma_chan_free_chan_resources(struct dma_chan *dchan)
544{
545 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
546
547 /* ASSERT: channel is idle */
548 if (axi_chan_is_hw_enable(chan))
549 dev_err(dchan2dev(dchan), "%s is non-idle!\n",
550 axi_chan_name(chan));
551
552 axi_chan_disable(chan);
553 axi_chan_irq_disable(chan, irq_mask: DWAXIDMAC_IRQ_ALL);
554
555 vchan_free_chan_resources(vc: &chan->vc);
556
557 dma_pool_destroy(pool: chan->desc_pool);
558 chan->desc_pool = NULL;
559 dev_vdbg(dchan2dev(dchan),
560 "%s: free resources, descriptor still allocated: %u\n",
561 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
562
563 pm_runtime_put(dev: chan->chip->dev);
564}
565
566static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
567{
568 struct axi_dma_chip *chip = chan->chip;
569 unsigned long reg_value, val;
570
571 if (!chip->apb_regs) {
572 dev_err(chip->dev, "apb_regs not initialized\n");
573 return;
574 }
575
576 /*
577 * An unused DMA channel has a default value of 0x3F.
578 * Lock the DMA channel by assign a handshake number to the channel.
579 * Unlock the DMA channel by assign 0x3F to the channel.
580 */
581 if (set)
582 val = chan->hw_handshake_num;
583 else
584 val = UNUSED_CHANNEL;
585
586 reg_value = lo_hi_readq(addr: chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
587
588 /* Channel is already allocated, set handshake as per channel ID */
589 /* 64 bit write should handle for 8 channels */
590
591 reg_value &= ~(DMA_APB_HS_SEL_MASK <<
592 (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
593 reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
594 lo_hi_writeq(val: reg_value, addr: chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
595
596 return;
597}
598
599/*
600 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
601 * as 1, it understands that the current block is the final block in the
602 * transfer and completes the DMA transfer operation at the end of current
603 * block transfer.
604 */
605static void set_desc_last(struct axi_dma_hw_desc *desc)
606{
607 u32 val;
608
609 val = le32_to_cpu(desc->lli->ctl_hi);
610 val |= CH_CTL_H_LLI_LAST;
611 desc->lli->ctl_hi = cpu_to_le32(val);
612}
613
614static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
615{
616 desc->lli->sar = cpu_to_le64(adr);
617}
618
619static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
620{
621 desc->lli->dar = cpu_to_le64(adr);
622}
623
624static void set_desc_src_master(struct axi_dma_hw_desc *desc)
625{
626 u32 val;
627
628 /* Select AXI0 for source master */
629 val = le32_to_cpu(desc->lli->ctl_lo);
630 val &= ~CH_CTL_L_SRC_MAST;
631 desc->lli->ctl_lo = cpu_to_le32(val);
632}
633
634static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
635 struct axi_dma_desc *desc)
636{
637 u32 val;
638
639 /* Select AXI1 for source master if available */
640 val = le32_to_cpu(hw_desc->lli->ctl_lo);
641 if (desc->chan->chip->dw->hdata->nr_masters > 1)
642 val |= CH_CTL_L_DST_MAST;
643 else
644 val &= ~CH_CTL_L_DST_MAST;
645
646 hw_desc->lli->ctl_lo = cpu_to_le32(val);
647}
648
649static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
650 struct axi_dma_hw_desc *hw_desc,
651 dma_addr_t mem_addr, size_t len)
652{
653 unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
654 unsigned int reg_width;
655 unsigned int mem_width;
656 dma_addr_t device_addr;
657 size_t axi_block_ts;
658 size_t block_ts;
659 u32 ctllo, ctlhi;
660 u32 burst_len;
661
662 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
663
664 mem_width = __ffs(data_width | mem_addr | len);
665 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
666 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
667
668 if (!IS_ALIGNED(mem_addr, 4)) {
669 dev_err(chan->chip->dev, "invalid buffer alignment\n");
670 return -EINVAL;
671 }
672
673 switch (chan->direction) {
674 case DMA_MEM_TO_DEV:
675 reg_width = __ffs(chan->config.dst_addr_width);
676 device_addr = chan->config.dst_addr;
677 ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
678 mem_width << CH_CTL_L_SRC_WIDTH_POS |
679 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
680 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
681 block_ts = len >> mem_width;
682 break;
683 case DMA_DEV_TO_MEM:
684 reg_width = __ffs(chan->config.src_addr_width);
685 device_addr = chan->config.src_addr;
686 ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
687 mem_width << CH_CTL_L_DST_WIDTH_POS |
688 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
689 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
690 block_ts = len >> reg_width;
691 break;
692 default:
693 return -EINVAL;
694 }
695
696 if (block_ts > axi_block_ts)
697 return -EINVAL;
698
699 hw_desc->lli = axi_desc_get(chan, addr: &hw_desc->llp);
700 if (unlikely(!hw_desc->lli))
701 return -ENOMEM;
702
703 ctlhi = CH_CTL_H_LLI_VALID;
704
705 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
706 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
707 ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
708 burst_len << CH_CTL_H_ARLEN_POS |
709 burst_len << CH_CTL_H_AWLEN_POS;
710 }
711
712 hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
713
714 if (chan->direction == DMA_MEM_TO_DEV) {
715 write_desc_sar(desc: hw_desc, adr: mem_addr);
716 write_desc_dar(desc: hw_desc, adr: device_addr);
717 } else {
718 write_desc_sar(desc: hw_desc, adr: device_addr);
719 write_desc_dar(desc: hw_desc, adr: mem_addr);
720 }
721
722 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
723
724 ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
725 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
726 hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
727
728 set_desc_src_master(hw_desc);
729
730 hw_desc->len = len;
731 return 0;
732}
733
734static size_t calculate_block_len(struct axi_dma_chan *chan,
735 dma_addr_t dma_addr, size_t buf_len,
736 enum dma_transfer_direction direction)
737{
738 u32 data_width, reg_width, mem_width;
739 size_t axi_block_ts, block_len;
740
741 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
742
743 switch (direction) {
744 case DMA_MEM_TO_DEV:
745 data_width = BIT(chan->chip->dw->hdata->m_data_width);
746 mem_width = __ffs(data_width | dma_addr | buf_len);
747 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
748 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
749
750 block_len = axi_block_ts << mem_width;
751 break;
752 case DMA_DEV_TO_MEM:
753 reg_width = __ffs(chan->config.src_addr_width);
754 block_len = axi_block_ts << reg_width;
755 break;
756 default:
757 block_len = 0;
758 }
759
760 return block_len;
761}
762
763static struct dma_async_tx_descriptor *
764dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
765 size_t buf_len, size_t period_len,
766 enum dma_transfer_direction direction,
767 unsigned long flags)
768{
769 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
770 struct axi_dma_hw_desc *hw_desc = NULL;
771 struct axi_dma_desc *desc = NULL;
772 dma_addr_t src_addr = dma_addr;
773 u32 num_periods, num_segments;
774 size_t axi_block_len;
775 u32 total_segments;
776 u32 segment_len;
777 unsigned int i;
778 int status;
779 u64 llp = 0;
780 u8 lms = 0; /* Select AXI0 master for LLI fetching */
781
782 num_periods = buf_len / period_len;
783
784 axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
785 if (axi_block_len == 0)
786 return NULL;
787
788 num_segments = DIV_ROUND_UP(period_len, axi_block_len);
789 segment_len = DIV_ROUND_UP(period_len, num_segments);
790
791 total_segments = num_periods * num_segments;
792
793 desc = axi_desc_alloc(num: total_segments);
794 if (unlikely(!desc))
795 goto err_desc_get;
796
797 chan->direction = direction;
798 desc->chan = chan;
799 chan->cyclic = true;
800 desc->length = 0;
801 desc->period_len = period_len;
802
803 for (i = 0; i < total_segments; i++) {
804 hw_desc = &desc->hw_desc[i];
805
806 status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem_addr: src_addr,
807 len: segment_len);
808 if (status < 0)
809 goto err_desc_get;
810
811 desc->length += hw_desc->len;
812 /* Set end-of-link to the linked descriptor, so that cyclic
813 * callback function can be triggered during interrupt.
814 */
815 set_desc_last(hw_desc);
816
817 src_addr += segment_len;
818 }
819
820 llp = desc->hw_desc[0].llp;
821
822 /* Managed transfer list */
823 do {
824 hw_desc = &desc->hw_desc[--total_segments];
825 write_desc_llp(desc: hw_desc, adr: llp | lms);
826 llp = hw_desc->llp;
827 } while (total_segments);
828
829 dw_axi_dma_set_hw_channel(chan, set: true);
830
831 return vchan_tx_prep(vc: &chan->vc, vd: &desc->vd, tx_flags: flags);
832
833err_desc_get:
834 if (desc)
835 axi_desc_put(desc);
836
837 return NULL;
838}
839
840static struct dma_async_tx_descriptor *
841dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
842 unsigned int sg_len,
843 enum dma_transfer_direction direction,
844 unsigned long flags, void *context)
845{
846 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
847 struct axi_dma_hw_desc *hw_desc = NULL;
848 struct axi_dma_desc *desc = NULL;
849 u32 num_segments, segment_len;
850 unsigned int loop = 0;
851 struct scatterlist *sg;
852 size_t axi_block_len;
853 u32 len, num_sgs = 0;
854 unsigned int i;
855 dma_addr_t mem;
856 int status;
857 u64 llp = 0;
858 u8 lms = 0; /* Select AXI0 master for LLI fetching */
859
860 if (unlikely(!is_slave_direction(direction) || !sg_len))
861 return NULL;
862
863 mem = sg_dma_address(sgl);
864 len = sg_dma_len(sgl);
865
866 axi_block_len = calculate_block_len(chan, dma_addr: mem, buf_len: len, direction);
867 if (axi_block_len == 0)
868 return NULL;
869
870 for_each_sg(sgl, sg, sg_len, i)
871 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
872
873 desc = axi_desc_alloc(num: num_sgs);
874 if (unlikely(!desc))
875 goto err_desc_get;
876
877 desc->chan = chan;
878 desc->length = 0;
879 chan->direction = direction;
880
881 for_each_sg(sgl, sg, sg_len, i) {
882 mem = sg_dma_address(sg);
883 len = sg_dma_len(sg);
884 num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
885 segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
886
887 do {
888 hw_desc = &desc->hw_desc[loop++];
889 status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem_addr: mem, len: segment_len);
890 if (status < 0)
891 goto err_desc_get;
892
893 desc->length += hw_desc->len;
894 len -= segment_len;
895 mem += segment_len;
896 } while (len >= segment_len);
897 }
898
899 /* Set end-of-link to the last link descriptor of list */
900 set_desc_last(&desc->hw_desc[num_sgs - 1]);
901
902 /* Managed transfer list */
903 do {
904 hw_desc = &desc->hw_desc[--num_sgs];
905 write_desc_llp(desc: hw_desc, adr: llp | lms);
906 llp = hw_desc->llp;
907 } while (num_sgs);
908
909 dw_axi_dma_set_hw_channel(chan, set: true);
910
911 return vchan_tx_prep(vc: &chan->vc, vd: &desc->vd, tx_flags: flags);
912
913err_desc_get:
914 if (desc)
915 axi_desc_put(desc);
916
917 return NULL;
918}
919
920static struct dma_async_tx_descriptor *
921dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
922 dma_addr_t src_adr, size_t len, unsigned long flags)
923{
924 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
925 size_t block_ts, max_block_ts, xfer_len;
926 struct axi_dma_hw_desc *hw_desc = NULL;
927 struct axi_dma_desc *desc = NULL;
928 u32 xfer_width, reg, num;
929 u64 llp = 0;
930 u8 lms = 0; /* Select AXI0 master for LLI fetching */
931
932 dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
933 axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
934
935 max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
936 xfer_width = axi_chan_get_xfer_width(chan, src: src_adr, dst: dst_adr, len);
937 num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
938 desc = axi_desc_alloc(num);
939 if (unlikely(!desc))
940 goto err_desc_get;
941
942 desc->chan = chan;
943 num = 0;
944 desc->length = 0;
945 while (len) {
946 xfer_len = len;
947
948 hw_desc = &desc->hw_desc[num];
949 /*
950 * Take care for the alignment.
951 * Actually source and destination widths can be different, but
952 * make them same to be simpler.
953 */
954 xfer_width = axi_chan_get_xfer_width(chan, src: src_adr, dst: dst_adr, len: xfer_len);
955
956 /*
957 * block_ts indicates the total number of data of width
958 * to be transferred in a DMA block transfer.
959 * BLOCK_TS register should be set to block_ts - 1
960 */
961 block_ts = xfer_len >> xfer_width;
962 if (block_ts > max_block_ts) {
963 block_ts = max_block_ts;
964 xfer_len = max_block_ts << xfer_width;
965 }
966
967 hw_desc->lli = axi_desc_get(chan, addr: &hw_desc->llp);
968 if (unlikely(!hw_desc->lli))
969 goto err_desc_get;
970
971 write_desc_sar(desc: hw_desc, adr: src_adr);
972 write_desc_dar(desc: hw_desc, adr: dst_adr);
973 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
974
975 reg = CH_CTL_H_LLI_VALID;
976 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
977 u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
978
979 reg |= (CH_CTL_H_ARLEN_EN |
980 burst_len << CH_CTL_H_ARLEN_POS |
981 CH_CTL_H_AWLEN_EN |
982 burst_len << CH_CTL_H_AWLEN_POS);
983 }
984 hw_desc->lli->ctl_hi = cpu_to_le32(reg);
985
986 reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
987 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
988 xfer_width << CH_CTL_L_DST_WIDTH_POS |
989 xfer_width << CH_CTL_L_SRC_WIDTH_POS |
990 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
991 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
992 hw_desc->lli->ctl_lo = cpu_to_le32(reg);
993
994 set_desc_src_master(hw_desc);
995 set_desc_dest_master(hw_desc, desc);
996
997 hw_desc->len = xfer_len;
998 desc->length += hw_desc->len;
999 /* update the length and addresses for the next loop cycle */
1000 len -= xfer_len;
1001 dst_adr += xfer_len;
1002 src_adr += xfer_len;
1003 num++;
1004 }
1005
1006 /* Set end-of-link to the last link descriptor of list */
1007 set_desc_last(&desc->hw_desc[num - 1]);
1008 /* Managed transfer list */
1009 do {
1010 hw_desc = &desc->hw_desc[--num];
1011 write_desc_llp(desc: hw_desc, adr: llp | lms);
1012 llp = hw_desc->llp;
1013 } while (num);
1014
1015 return vchan_tx_prep(vc: &chan->vc, vd: &desc->vd, tx_flags: flags);
1016
1017err_desc_get:
1018 if (desc)
1019 axi_desc_put(desc);
1020 return NULL;
1021}
1022
1023static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
1024 struct dma_slave_config *config)
1025{
1026 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1027
1028 memcpy(&chan->config, config, sizeof(*config));
1029
1030 return 0;
1031}
1032
1033static void axi_chan_dump_lli(struct axi_dma_chan *chan,
1034 struct axi_dma_hw_desc *desc)
1035{
1036 if (!desc->lli) {
1037 dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
1038 return;
1039 }
1040
1041 dev_err(dchan2dev(&chan->vc.chan),
1042 "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
1043 le64_to_cpu(desc->lli->sar),
1044 le64_to_cpu(desc->lli->dar),
1045 le64_to_cpu(desc->lli->llp),
1046 le32_to_cpu(desc->lli->block_ts_lo),
1047 le32_to_cpu(desc->lli->ctl_hi),
1048 le32_to_cpu(desc->lli->ctl_lo));
1049}
1050
1051static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
1052 struct axi_dma_desc *desc_head)
1053{
1054 int count = atomic_read(v: &chan->descs_allocated);
1055 int i;
1056
1057 for (i = 0; i < count; i++)
1058 axi_chan_dump_lli(chan, desc: &desc_head->hw_desc[i]);
1059}
1060
1061static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
1062{
1063 struct virt_dma_desc *vd;
1064 unsigned long flags;
1065
1066 spin_lock_irqsave(&chan->vc.lock, flags);
1067
1068 axi_chan_disable(chan);
1069
1070 /* The bad descriptor currently is in the head of vc list */
1071 vd = vchan_next_desc(vc: &chan->vc);
1072 if (!vd) {
1073 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1074 axi_chan_name(chan));
1075 goto out;
1076 }
1077 /* Remove the completed descriptor from issued list */
1078 list_del(entry: &vd->node);
1079
1080 /* WARN about bad descriptor */
1081 dev_err(chan2dev(chan),
1082 "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
1083 axi_chan_name(chan), vd->tx.cookie, status);
1084 axi_chan_list_dump_lli(chan, desc_head: vd_to_axi_desc(vd));
1085
1086 vchan_cookie_complete(vd);
1087
1088 /* Try to restart the controller */
1089 axi_chan_start_first_queued(chan);
1090
1091out:
1092 spin_unlock_irqrestore(lock: &chan->vc.lock, flags);
1093}
1094
1095static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
1096{
1097 int count = atomic_read(v: &chan->descs_allocated);
1098 struct axi_dma_hw_desc *hw_desc;
1099 struct axi_dma_desc *desc;
1100 struct virt_dma_desc *vd;
1101 unsigned long flags;
1102 u64 llp;
1103 int i;
1104
1105 spin_lock_irqsave(&chan->vc.lock, flags);
1106 if (unlikely(axi_chan_is_hw_enable(chan))) {
1107 dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
1108 axi_chan_name(chan));
1109 axi_chan_disable(chan);
1110 }
1111
1112 /* The completed descriptor currently is in the head of vc list */
1113 vd = vchan_next_desc(vc: &chan->vc);
1114 if (!vd) {
1115 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1116 axi_chan_name(chan));
1117 goto out;
1118 }
1119
1120 if (chan->cyclic) {
1121 desc = vd_to_axi_desc(vd);
1122 if (desc) {
1123 llp = lo_hi_readq(addr: chan->chan_regs + CH_LLP);
1124 for (i = 0; i < count; i++) {
1125 hw_desc = &desc->hw_desc[i];
1126 if (hw_desc->llp == llp) {
1127 axi_chan_irq_clear(chan, irq_mask: hw_desc->lli->status_lo);
1128 hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
1129 desc->completed_blocks = i;
1130
1131 if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
1132 vchan_cyclic_callback(vd);
1133 break;
1134 }
1135 }
1136
1137 axi_chan_enable(chan);
1138 }
1139 } else {
1140 /* Remove the completed descriptor from issued list before completing */
1141 list_del(entry: &vd->node);
1142 vchan_cookie_complete(vd);
1143 }
1144
1145out:
1146 spin_unlock_irqrestore(lock: &chan->vc.lock, flags);
1147}
1148
1149static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
1150{
1151 struct axi_dma_chip *chip = dev_id;
1152 struct dw_axi_dma *dw = chip->dw;
1153 struct axi_dma_chan *chan;
1154
1155 u32 status, i;
1156
1157 /* Disable DMAC interrupts. We'll enable them after processing channels */
1158 axi_dma_irq_disable(chip);
1159
1160 /* Poll, clear and process every channel interrupt status */
1161 for (i = 0; i < dw->hdata->nr_channels; i++) {
1162 chan = &dw->chan[i];
1163 status = axi_chan_irq_read(chan);
1164 axi_chan_irq_clear(chan, irq_mask: status);
1165
1166 dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
1167 axi_chan_name(chan), i, status);
1168
1169 if (status & DWAXIDMAC_IRQ_ALL_ERR)
1170 axi_chan_handle_err(chan, status);
1171 else if (status & DWAXIDMAC_IRQ_DMA_TRF)
1172 axi_chan_block_xfer_complete(chan);
1173 }
1174
1175 /* Re-enable interrupts */
1176 axi_dma_irq_enable(chip);
1177
1178 return IRQ_HANDLED;
1179}
1180
1181static int dma_chan_terminate_all(struct dma_chan *dchan)
1182{
1183 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1184 u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
1185 unsigned long flags;
1186 u32 val;
1187 int ret;
1188 LIST_HEAD(head);
1189
1190 axi_chan_disable(chan);
1191
1192 ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
1193 !(val & chan_active), 1000, 50000);
1194 if (ret == -ETIMEDOUT)
1195 dev_warn(dchan2dev(dchan),
1196 "%s failed to stop\n", axi_chan_name(chan));
1197
1198 if (chan->direction != DMA_MEM_TO_MEM)
1199 dw_axi_dma_set_hw_channel(chan, set: false);
1200 if (chan->direction == DMA_MEM_TO_DEV)
1201 dw_axi_dma_set_byte_halfword(chan, set: false);
1202
1203 spin_lock_irqsave(&chan->vc.lock, flags);
1204
1205 vchan_get_all_descriptors(vc: &chan->vc, head: &head);
1206
1207 chan->cyclic = false;
1208 spin_unlock_irqrestore(lock: &chan->vc.lock, flags);
1209
1210 vchan_dma_desc_free_list(vc: &chan->vc, head: &head);
1211
1212 dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
1213
1214 return 0;
1215}
1216
1217static int dma_chan_pause(struct dma_chan *dchan)
1218{
1219 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1220 unsigned long flags;
1221 unsigned int timeout = 20; /* timeout iterations */
1222 u64 val;
1223
1224 spin_lock_irqsave(&chan->vc.lock, flags);
1225
1226 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
1227 val = axi_dma_ioread64(chip: chan->chip, DMAC_CHSUSPREG);
1228 if (chan->id >= DMAC_CHAN_16) {
1229 val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
1230 << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
1231 (u64)(BIT(chan->id) >> DMAC_CHAN_16)
1232 << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
1233 } else {
1234 val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
1235 BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
1236 }
1237 axi_dma_iowrite64(chip: chan->chip, DMAC_CHSUSPREG, val);
1238 } else {
1239 if (chan->chip->dw->hdata->reg_map_8_channels) {
1240 val = axi_dma_ioread32(chip: chan->chip, DMAC_CHEN);
1241 val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
1242 BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
1243 axi_dma_iowrite32(chip: chan->chip, DMAC_CHEN, val: (u32)val);
1244 } else {
1245 val = axi_dma_ioread32(chip: chan->chip, DMAC_CHSUSPREG);
1246 val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
1247 BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
1248 axi_dma_iowrite32(chip: chan->chip, DMAC_CHSUSPREG, val: (u32)val);
1249 }
1250 }
1251
1252 do {
1253 if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
1254 break;
1255
1256 udelay(usec: 2);
1257 } while (--timeout);
1258
1259 axi_chan_irq_clear(chan, irq_mask: DWAXIDMAC_IRQ_SUSPENDED);
1260
1261 chan->is_paused = true;
1262
1263 spin_unlock_irqrestore(lock: &chan->vc.lock, flags);
1264
1265 return timeout ? 0 : -EAGAIN;
1266}
1267
1268/* Called in chan locked context */
1269static inline void axi_chan_resume(struct axi_dma_chan *chan)
1270{
1271 u64 val;
1272
1273 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
1274 val = axi_dma_ioread64(chip: chan->chip, DMAC_CHSUSPREG);
1275 if (chan->id >= DMAC_CHAN_16) {
1276 val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
1277 << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
1278 val |= ((u64)(BIT(chan->id) >> DMAC_CHAN_16)
1279 << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
1280 } else {
1281 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
1282 val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
1283 }
1284 axi_dma_iowrite64(chip: chan->chip, DMAC_CHSUSPREG, val);
1285 } else {
1286 if (chan->chip->dw->hdata->reg_map_8_channels) {
1287 val = axi_dma_ioread32(chip: chan->chip, DMAC_CHEN);
1288 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
1289 val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
1290 axi_dma_iowrite32(chip: chan->chip, DMAC_CHEN, val: (u32)val);
1291 } else {
1292 val = axi_dma_ioread32(chip: chan->chip, DMAC_CHSUSPREG);
1293 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
1294 val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
1295 axi_dma_iowrite32(chip: chan->chip, DMAC_CHSUSPREG, val: (u32)val);
1296 }
1297 }
1298
1299 chan->is_paused = false;
1300}
1301
1302static int dma_chan_resume(struct dma_chan *dchan)
1303{
1304 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1305 unsigned long flags;
1306
1307 spin_lock_irqsave(&chan->vc.lock, flags);
1308
1309 if (chan->is_paused)
1310 axi_chan_resume(chan);
1311
1312 spin_unlock_irqrestore(lock: &chan->vc.lock, flags);
1313
1314 return 0;
1315}
1316
1317static int axi_dma_suspend(struct axi_dma_chip *chip)
1318{
1319 axi_dma_irq_disable(chip);
1320 axi_dma_disable(chip);
1321
1322 clk_disable_unprepare(clk: chip->core_clk);
1323 clk_disable_unprepare(clk: chip->cfgr_clk);
1324
1325 return 0;
1326}
1327
1328static int axi_dma_resume(struct axi_dma_chip *chip)
1329{
1330 int ret;
1331
1332 ret = clk_prepare_enable(clk: chip->cfgr_clk);
1333 if (ret < 0)
1334 return ret;
1335
1336 ret = clk_prepare_enable(clk: chip->core_clk);
1337 if (ret < 0)
1338 return ret;
1339
1340 axi_dma_enable(chip);
1341 axi_dma_irq_enable(chip);
1342
1343 return 0;
1344}
1345
1346static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
1347{
1348 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1349
1350 return axi_dma_suspend(chip);
1351}
1352
1353static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
1354{
1355 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1356
1357 return axi_dma_resume(chip);
1358}
1359
1360static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
1361 struct of_dma *ofdma)
1362{
1363 struct dw_axi_dma *dw = ofdma->of_dma_data;
1364 struct axi_dma_chan *chan;
1365 struct dma_chan *dchan;
1366
1367 dchan = dma_get_any_slave_channel(device: &dw->dma);
1368 if (!dchan)
1369 return NULL;
1370
1371 chan = dchan_to_axi_dma_chan(dchan);
1372 chan->hw_handshake_num = dma_spec->args[0];
1373 return dchan;
1374}
1375
1376static int parse_device_properties(struct axi_dma_chip *chip)
1377{
1378 struct device *dev = chip->dev;
1379 u32 tmp, carr[DMAC_MAX_CHANNELS];
1380 int ret;
1381
1382 ret = device_property_read_u32(dev, propname: "dma-channels", val: &tmp);
1383 if (ret)
1384 return ret;
1385 if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
1386 return -EINVAL;
1387
1388 chip->dw->hdata->nr_channels = tmp;
1389 if (tmp <= DMA_REG_MAP_CH_REF)
1390 chip->dw->hdata->reg_map_8_channels = true;
1391
1392 ret = device_property_read_u32(dev, propname: "snps,dma-masters", val: &tmp);
1393 if (ret)
1394 return ret;
1395 if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
1396 return -EINVAL;
1397
1398 chip->dw->hdata->nr_masters = tmp;
1399
1400 ret = device_property_read_u32(dev, propname: "snps,data-width", val: &tmp);
1401 if (ret)
1402 return ret;
1403 if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
1404 return -EINVAL;
1405
1406 chip->dw->hdata->m_data_width = tmp;
1407
1408 ret = device_property_read_u32_array(dev, propname: "snps,block-size", val: carr,
1409 nval: chip->dw->hdata->nr_channels);
1410 if (ret)
1411 return ret;
1412 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1413 if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
1414 return -EINVAL;
1415
1416 chip->dw->hdata->block_size[tmp] = carr[tmp];
1417 }
1418
1419 ret = device_property_read_u32_array(dev, propname: "snps,priority", val: carr,
1420 nval: chip->dw->hdata->nr_channels);
1421 if (ret)
1422 return ret;
1423 /* Priority value must be programmed within [0:nr_channels-1] range */
1424 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1425 if (carr[tmp] >= chip->dw->hdata->nr_channels)
1426 return -EINVAL;
1427
1428 chip->dw->hdata->priority[tmp] = carr[tmp];
1429 }
1430
1431 /* axi-max-burst-len is optional property */
1432 ret = device_property_read_u32(dev, propname: "snps,axi-max-burst-len", val: &tmp);
1433 if (!ret) {
1434 if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
1435 return -EINVAL;
1436 if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
1437 return -EINVAL;
1438
1439 chip->dw->hdata->restrict_axi_burst_len = true;
1440 chip->dw->hdata->axi_rw_burst_len = tmp;
1441 }
1442
1443 return 0;
1444}
1445
1446static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip)
1447{
1448 int irq_count = platform_irq_count(pdev);
1449 int ret;
1450
1451 for (int i = 0; i < irq_count; i++) {
1452 chip->irq[i] = platform_get_irq(pdev, i);
1453 if (chip->irq[i] < 0)
1454 return chip->irq[i];
1455 ret = devm_request_irq(dev: chip->dev, irq: chip->irq[i], handler: dw_axi_dma_interrupt,
1456 IRQF_SHARED, KBUILD_MODNAME, dev_id: chip);
1457 if (ret < 0)
1458 return ret;
1459 }
1460
1461 return 0;
1462}
1463
1464static int dw_probe(struct platform_device *pdev)
1465{
1466 struct axi_dma_chip *chip;
1467 struct dw_axi_dma *dw;
1468 struct dw_axi_dma_hcfg *hdata;
1469 struct reset_control *resets;
1470 unsigned int flags;
1471 u32 i;
1472 int ret;
1473
1474 chip = devm_kzalloc(dev: &pdev->dev, size: sizeof(*chip), GFP_KERNEL);
1475 if (!chip)
1476 return -ENOMEM;
1477
1478 dw = devm_kzalloc(dev: &pdev->dev, size: sizeof(*dw), GFP_KERNEL);
1479 if (!dw)
1480 return -ENOMEM;
1481
1482 hdata = devm_kzalloc(dev: &pdev->dev, size: sizeof(*hdata), GFP_KERNEL);
1483 if (!hdata)
1484 return -ENOMEM;
1485
1486 chip->dw = dw;
1487 chip->dev = &pdev->dev;
1488 chip->dw->hdata = hdata;
1489
1490 chip->regs = devm_platform_ioremap_resource(pdev, index: 0);
1491 if (IS_ERR(ptr: chip->regs))
1492 return PTR_ERR(ptr: chip->regs);
1493
1494 flags = (uintptr_t)of_device_get_match_data(dev: &pdev->dev);
1495 if (flags & AXI_DMA_FLAG_HAS_APB_REGS) {
1496 chip->apb_regs = devm_platform_ioremap_resource(pdev, index: 1);
1497 if (IS_ERR(ptr: chip->apb_regs))
1498 return PTR_ERR(ptr: chip->apb_regs);
1499 }
1500
1501 if (flags & AXI_DMA_FLAG_HAS_RESETS) {
1502 resets = devm_reset_control_array_get_exclusive(dev: &pdev->dev);
1503 if (IS_ERR(ptr: resets))
1504 return PTR_ERR(ptr: resets);
1505
1506 ret = reset_control_deassert(rstc: resets);
1507 if (ret)
1508 return ret;
1509 }
1510
1511 chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2);
1512
1513 chip->core_clk = devm_clk_get(dev: chip->dev, id: "core-clk");
1514 if (IS_ERR(ptr: chip->core_clk))
1515 return PTR_ERR(ptr: chip->core_clk);
1516
1517 chip->cfgr_clk = devm_clk_get(dev: chip->dev, id: "cfgr-clk");
1518 if (IS_ERR(ptr: chip->cfgr_clk))
1519 return PTR_ERR(ptr: chip->cfgr_clk);
1520
1521 ret = parse_device_properties(chip);
1522 if (ret)
1523 return ret;
1524
1525 dw->chan = devm_kcalloc(dev: chip->dev, n: hdata->nr_channels,
1526 size: sizeof(*dw->chan), GFP_KERNEL);
1527 if (!dw->chan)
1528 return -ENOMEM;
1529
1530 ret = axi_req_irqs(pdev, chip);
1531 if (ret)
1532 return ret;
1533
1534 INIT_LIST_HEAD(list: &dw->dma.channels);
1535 for (i = 0; i < hdata->nr_channels; i++) {
1536 struct axi_dma_chan *chan = &dw->chan[i];
1537
1538 chan->chip = chip;
1539 chan->id = i;
1540 chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
1541 atomic_set(v: &chan->descs_allocated, i: 0);
1542
1543 chan->vc.desc_free = vchan_desc_put;
1544 vchan_init(vc: &chan->vc, dmadev: &dw->dma);
1545 }
1546
1547 /* Set capabilities */
1548 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1549 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1550 dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
1551
1552 /* DMA capabilities */
1553 dw->dma.max_burst = hdata->axi_rw_burst_len;
1554 dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
1555 dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
1556 dw->dma.directions = BIT(DMA_MEM_TO_MEM);
1557 dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1558 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1559
1560 dw->dma.dev = chip->dev;
1561 dw->dma.device_tx_status = dma_chan_tx_status;
1562 dw->dma.device_issue_pending = dma_chan_issue_pending;
1563 dw->dma.device_terminate_all = dma_chan_terminate_all;
1564 dw->dma.device_pause = dma_chan_pause;
1565 dw->dma.device_resume = dma_chan_resume;
1566
1567 dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
1568 dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
1569
1570 dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
1571 dw->dma.device_synchronize = dw_axi_dma_synchronize;
1572 dw->dma.device_config = dw_axi_dma_chan_slave_config;
1573 dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
1574 dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
1575
1576 /*
1577 * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
1578 * supported blocks is 1024. Device register width is 4 bytes.
1579 * Therefore, set constraint to 1024 * 4.
1580 */
1581 dw->dma.dev->dma_parms = &dw->dma_parms;
1582 dma_set_max_seg_size(dev: &pdev->dev, MAX_BLOCK_SIZE);
1583 platform_set_drvdata(pdev, data: chip);
1584
1585 pm_runtime_enable(dev: chip->dev);
1586
1587 /*
1588 * We can't just call pm_runtime_get here instead of
1589 * pm_runtime_get_noresume + axi_dma_resume because we need
1590 * driver to work also without Runtime PM.
1591 */
1592 pm_runtime_get_noresume(dev: chip->dev);
1593 ret = axi_dma_resume(chip);
1594 if (ret < 0)
1595 goto err_pm_disable;
1596
1597 axi_dma_hw_init(chip);
1598
1599 pm_runtime_put(dev: chip->dev);
1600
1601 ret = dmaenginem_async_device_register(device: &dw->dma);
1602 if (ret)
1603 goto err_pm_disable;
1604
1605 /* Register with OF helpers for DMA lookups */
1606 ret = of_dma_controller_register(np: pdev->dev.of_node,
1607 of_dma_xlate: dw_axi_dma_of_xlate, data: dw);
1608 if (ret < 0)
1609 dev_warn(&pdev->dev,
1610 "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
1611
1612 dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
1613 dw->hdata->nr_channels);
1614
1615 return 0;
1616
1617err_pm_disable:
1618 pm_runtime_disable(dev: chip->dev);
1619
1620 return ret;
1621}
1622
1623static void dw_remove(struct platform_device *pdev)
1624{
1625 struct axi_dma_chip *chip = platform_get_drvdata(pdev);
1626 struct dw_axi_dma *dw = chip->dw;
1627 struct axi_dma_chan *chan, *_chan;
1628 u32 i;
1629
1630 /* Enable clk before accessing to registers */
1631 clk_prepare_enable(clk: chip->cfgr_clk);
1632 clk_prepare_enable(clk: chip->core_clk);
1633 axi_dma_irq_disable(chip);
1634 for (i = 0; i < dw->hdata->nr_channels; i++) {
1635 axi_chan_disable(chan: &chip->dw->chan[i]);
1636 axi_chan_irq_disable(chan: &chip->dw->chan[i], irq_mask: DWAXIDMAC_IRQ_ALL);
1637 }
1638 axi_dma_disable(chip);
1639
1640 pm_runtime_disable(dev: chip->dev);
1641 axi_dma_suspend(chip);
1642
1643 for (i = 0; i < DMAC_MAX_CHANNELS; i++)
1644 if (chip->irq[i] > 0)
1645 devm_free_irq(dev: chip->dev, irq: chip->irq[i], dev_id: chip);
1646
1647 of_dma_controller_free(np: chip->dev->of_node);
1648
1649 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1650 vc.chan.device_node) {
1651 list_del(entry: &chan->vc.chan.device_node);
1652 tasklet_kill(t: &chan->vc.task);
1653 }
1654}
1655
1656static const struct dev_pm_ops dw_axi_dma_pm_ops = {
1657 SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
1658};
1659
1660static const struct of_device_id dw_dma_of_id_table[] = {
1661 {
1662 .compatible = "snps,axi-dma-1.01a"
1663 }, {
1664 .compatible = "intel,kmb-axi-dma",
1665 .data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,
1666 }, {
1667 .compatible = "starfive,jh7110-axi-dma",
1668 .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
1669 }, {
1670 .compatible = "starfive,jh8100-axi-dma",
1671 .data = (void *)AXI_DMA_FLAG_HAS_RESETS,
1672 },
1673 {}
1674};
1675MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
1676
1677static struct platform_driver dw_driver = {
1678 .probe = dw_probe,
1679 .remove = dw_remove,
1680 .driver = {
1681 .name = KBUILD_MODNAME,
1682 .of_match_table = dw_dma_of_id_table,
1683 .pm = &dw_axi_dma_pm_ops,
1684 },
1685};
1686module_platform_driver(dw_driver);
1687
1688MODULE_LICENSE("GPL v2");
1689MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1690MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
1691

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of linux/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c