1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright 2012 Marvell International Ltd. |
4 | */ |
5 | |
6 | #include <linux/err.h> |
7 | #include <linux/module.h> |
8 | #include <linux/init.h> |
9 | #include <linux/types.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/dmaengine.h> |
14 | #include <linux/platform_device.h> |
15 | #include <linux/device.h> |
16 | #include <linux/platform_data/mmp_dma.h> |
17 | #include <linux/dmapool.h> |
18 | #include <linux/of_dma.h> |
19 | #include <linux/of.h> |
20 | |
21 | #include "dmaengine.h" |
22 | |
23 | #define DCSR 0x0000 |
24 | #define DALGN 0x00a0 |
25 | #define DINT 0x00f0 |
26 | #define DDADR 0x0200 |
27 | #define DSADR(n) (0x0204 + ((n) << 4)) |
28 | #define DTADR(n) (0x0208 + ((n) << 4)) |
29 | #define DCMD 0x020c |
30 | |
31 | #define DCSR_RUN BIT(31) /* Run Bit (read / write) */ |
32 | #define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ |
33 | #define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */ |
34 | #define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ |
35 | #define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ |
36 | #define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ |
37 | #define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ |
38 | #define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ |
39 | |
40 | #define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */ |
41 | #define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ |
42 | #define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ |
43 | #define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ |
44 | #define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ |
45 | #define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ |
46 | #define DCSR_EORINTR BIT(9) /* The end of Receive */ |
47 | |
48 | #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2)) |
49 | #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ |
50 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ |
51 | |
52 | #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ |
53 | #define DDADR_STOP BIT(0) /* Stop (read / write) */ |
54 | |
55 | #define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ |
56 | #define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ |
57 | #define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ |
58 | #define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ |
59 | #define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ |
60 | #define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ |
61 | #define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ |
62 | #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ |
63 | #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ |
64 | #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ |
65 | #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ |
66 | #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ |
67 | #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ |
68 | #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ |
69 | |
70 | #define PDMA_MAX_DESC_BYTES DCMD_LENGTH |
71 | |
72 | struct mmp_pdma_desc_hw { |
73 | u32 ddadr; /* Points to the next descriptor + flags */ |
74 | u32 dsadr; /* DSADR value for the current transfer */ |
75 | u32 dtadr; /* DTADR value for the current transfer */ |
76 | u32 dcmd; /* DCMD value for the current transfer */ |
77 | } __aligned(32); |
78 | |
79 | struct mmp_pdma_desc_sw { |
80 | struct mmp_pdma_desc_hw desc; |
81 | struct list_head node; |
82 | struct list_head tx_list; |
83 | struct dma_async_tx_descriptor async_tx; |
84 | }; |
85 | |
86 | struct mmp_pdma_phy; |
87 | |
88 | struct mmp_pdma_chan { |
89 | struct device *dev; |
90 | struct dma_chan chan; |
91 | struct dma_async_tx_descriptor desc; |
92 | struct mmp_pdma_phy *phy; |
93 | enum dma_transfer_direction dir; |
94 | struct dma_slave_config slave_config; |
95 | |
96 | struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel |
97 | * is in cyclic mode */ |
98 | |
99 | /* channel's basic info */ |
100 | struct tasklet_struct tasklet; |
101 | u32 dcmd; |
102 | u32 drcmr; |
103 | u32 dev_addr; |
104 | |
105 | /* list for desc */ |
106 | spinlock_t desc_lock; /* Descriptor list lock */ |
107 | struct list_head chain_pending; /* Link descriptors queue for pending */ |
108 | struct list_head chain_running; /* Link descriptors queue for running */ |
109 | bool idle; /* channel statue machine */ |
110 | bool byte_align; |
111 | |
112 | struct dma_pool *desc_pool; /* Descriptors pool */ |
113 | }; |
114 | |
115 | struct mmp_pdma_phy { |
116 | int idx; |
117 | void __iomem *base; |
118 | struct mmp_pdma_chan *vchan; |
119 | }; |
120 | |
121 | struct mmp_pdma_device { |
122 | int dma_channels; |
123 | void __iomem *base; |
124 | struct device *dev; |
125 | struct dma_device device; |
126 | struct mmp_pdma_phy *phy; |
127 | spinlock_t phy_lock; /* protect alloc/free phy channels */ |
128 | }; |
129 | |
130 | #define tx_to_mmp_pdma_desc(tx) \ |
131 | container_of(tx, struct mmp_pdma_desc_sw, async_tx) |
132 | #define to_mmp_pdma_desc(lh) \ |
133 | container_of(lh, struct mmp_pdma_desc_sw, node) |
134 | #define to_mmp_pdma_chan(dchan) \ |
135 | container_of(dchan, struct mmp_pdma_chan, chan) |
136 | #define to_mmp_pdma_dev(dmadev) \ |
137 | container_of(dmadev, struct mmp_pdma_device, device) |
138 | |
139 | static int mmp_pdma_config_write(struct dma_chan *dchan, |
140 | struct dma_slave_config *cfg, |
141 | enum dma_transfer_direction direction); |
142 | |
143 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) |
144 | { |
145 | u32 reg = (phy->idx << 4) + DDADR; |
146 | |
147 | writel(val: addr, addr: phy->base + reg); |
148 | } |
149 | |
150 | static void enable_chan(struct mmp_pdma_phy *phy) |
151 | { |
152 | u32 reg, dalgn; |
153 | |
154 | if (!phy->vchan) |
155 | return; |
156 | |
157 | reg = DRCMR(phy->vchan->drcmr); |
158 | writel(DRCMR_MAPVLD | phy->idx, addr: phy->base + reg); |
159 | |
160 | dalgn = readl(addr: phy->base + DALGN); |
161 | if (phy->vchan->byte_align) |
162 | dalgn |= 1 << phy->idx; |
163 | else |
164 | dalgn &= ~(1 << phy->idx); |
165 | writel(val: dalgn, addr: phy->base + DALGN); |
166 | |
167 | reg = (phy->idx << 2) + DCSR; |
168 | writel(readl(addr: phy->base + reg) | DCSR_RUN, addr: phy->base + reg); |
169 | } |
170 | |
171 | static void disable_chan(struct mmp_pdma_phy *phy) |
172 | { |
173 | u32 reg; |
174 | |
175 | if (!phy) |
176 | return; |
177 | |
178 | reg = (phy->idx << 2) + DCSR; |
179 | writel(readl(addr: phy->base + reg) & ~DCSR_RUN, addr: phy->base + reg); |
180 | } |
181 | |
182 | static int clear_chan_irq(struct mmp_pdma_phy *phy) |
183 | { |
184 | u32 dcsr; |
185 | u32 dint = readl(addr: phy->base + DINT); |
186 | u32 reg = (phy->idx << 2) + DCSR; |
187 | |
188 | if (!(dint & BIT(phy->idx))) |
189 | return -EAGAIN; |
190 | |
191 | /* clear irq */ |
192 | dcsr = readl(addr: phy->base + reg); |
193 | writel(val: dcsr, addr: phy->base + reg); |
194 | if ((dcsr & DCSR_BUSERR) && (phy->vchan)) |
195 | dev_warn(phy->vchan->dev, "DCSR_BUSERR\n" ); |
196 | |
197 | return 0; |
198 | } |
199 | |
200 | static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) |
201 | { |
202 | struct mmp_pdma_phy *phy = dev_id; |
203 | |
204 | if (clear_chan_irq(phy) != 0) |
205 | return IRQ_NONE; |
206 | |
207 | tasklet_schedule(t: &phy->vchan->tasklet); |
208 | return IRQ_HANDLED; |
209 | } |
210 | |
211 | static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) |
212 | { |
213 | struct mmp_pdma_device *pdev = dev_id; |
214 | struct mmp_pdma_phy *phy; |
215 | u32 dint = readl(addr: pdev->base + DINT); |
216 | int i, ret; |
217 | int irq_num = 0; |
218 | |
219 | while (dint) { |
220 | i = __ffs(dint); |
221 | /* only handle interrupts belonging to pdma driver*/ |
222 | if (i >= pdev->dma_channels) |
223 | break; |
224 | dint &= (dint - 1); |
225 | phy = &pdev->phy[i]; |
226 | ret = mmp_pdma_chan_handler(irq, dev_id: phy); |
227 | if (ret == IRQ_HANDLED) |
228 | irq_num++; |
229 | } |
230 | |
231 | if (irq_num) |
232 | return IRQ_HANDLED; |
233 | |
234 | return IRQ_NONE; |
235 | } |
236 | |
237 | /* lookup free phy channel as descending priority */ |
238 | static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) |
239 | { |
240 | int prio, i; |
241 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); |
242 | struct mmp_pdma_phy *phy, *found = NULL; |
243 | unsigned long flags; |
244 | |
245 | /* |
246 | * dma channel priorities |
247 | * ch 0 - 3, 16 - 19 <--> (0) |
248 | * ch 4 - 7, 20 - 23 <--> (1) |
249 | * ch 8 - 11, 24 - 27 <--> (2) |
250 | * ch 12 - 15, 28 - 31 <--> (3) |
251 | */ |
252 | |
253 | spin_lock_irqsave(&pdev->phy_lock, flags); |
254 | for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) { |
255 | for (i = 0; i < pdev->dma_channels; i++) { |
256 | if (prio != (i & 0xf) >> 2) |
257 | continue; |
258 | phy = &pdev->phy[i]; |
259 | if (!phy->vchan) { |
260 | phy->vchan = pchan; |
261 | found = phy; |
262 | goto out_unlock; |
263 | } |
264 | } |
265 | } |
266 | |
267 | out_unlock: |
268 | spin_unlock_irqrestore(lock: &pdev->phy_lock, flags); |
269 | return found; |
270 | } |
271 | |
272 | static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan) |
273 | { |
274 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); |
275 | unsigned long flags; |
276 | u32 reg; |
277 | |
278 | if (!pchan->phy) |
279 | return; |
280 | |
281 | /* clear the channel mapping in DRCMR */ |
282 | reg = DRCMR(pchan->drcmr); |
283 | writel(val: 0, addr: pchan->phy->base + reg); |
284 | |
285 | spin_lock_irqsave(&pdev->phy_lock, flags); |
286 | pchan->phy->vchan = NULL; |
287 | pchan->phy = NULL; |
288 | spin_unlock_irqrestore(lock: &pdev->phy_lock, flags); |
289 | } |
290 | |
291 | /* |
292 | * start_pending_queue - transfer any pending transactions |
293 | * pending list ==> running list |
294 | */ |
295 | static void start_pending_queue(struct mmp_pdma_chan *chan) |
296 | { |
297 | struct mmp_pdma_desc_sw *desc; |
298 | |
299 | /* still in running, irq will start the pending list */ |
300 | if (!chan->idle) { |
301 | dev_dbg(chan->dev, "DMA controller still busy\n" ); |
302 | return; |
303 | } |
304 | |
305 | if (list_empty(head: &chan->chain_pending)) { |
306 | /* chance to re-fetch phy channel with higher prio */ |
307 | mmp_pdma_free_phy(pchan: chan); |
308 | dev_dbg(chan->dev, "no pending list\n" ); |
309 | return; |
310 | } |
311 | |
312 | if (!chan->phy) { |
313 | chan->phy = lookup_phy(pchan: chan); |
314 | if (!chan->phy) { |
315 | dev_dbg(chan->dev, "no free dma channel\n" ); |
316 | return; |
317 | } |
318 | } |
319 | |
320 | /* |
321 | * pending -> running |
322 | * reintilize pending list |
323 | */ |
324 | desc = list_first_entry(&chan->chain_pending, |
325 | struct mmp_pdma_desc_sw, node); |
326 | list_splice_tail_init(list: &chan->chain_pending, head: &chan->chain_running); |
327 | |
328 | /* |
329 | * Program the descriptor's address into the DMA controller, |
330 | * then start the DMA transaction |
331 | */ |
332 | set_desc(phy: chan->phy, addr: desc->async_tx.phys); |
333 | enable_chan(phy: chan->phy); |
334 | chan->idle = false; |
335 | } |
336 | |
337 | |
338 | /* desc->tx_list ==> pending list */ |
339 | static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) |
340 | { |
341 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); |
342 | struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); |
343 | struct mmp_pdma_desc_sw *child; |
344 | unsigned long flags; |
345 | dma_cookie_t cookie = -EBUSY; |
346 | |
347 | spin_lock_irqsave(&chan->desc_lock, flags); |
348 | |
349 | list_for_each_entry(child, &desc->tx_list, node) { |
350 | cookie = dma_cookie_assign(tx: &child->async_tx); |
351 | } |
352 | |
353 | /* softly link to pending list - desc->tx_list ==> pending list */ |
354 | list_splice_tail_init(list: &desc->tx_list, head: &chan->chain_pending); |
355 | |
356 | spin_unlock_irqrestore(lock: &chan->desc_lock, flags); |
357 | |
358 | return cookie; |
359 | } |
360 | |
361 | static struct mmp_pdma_desc_sw * |
362 | mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) |
363 | { |
364 | struct mmp_pdma_desc_sw *desc; |
365 | dma_addr_t pdesc; |
366 | |
367 | desc = dma_pool_zalloc(pool: chan->desc_pool, GFP_ATOMIC, handle: &pdesc); |
368 | if (!desc) { |
369 | dev_err(chan->dev, "out of memory for link descriptor\n" ); |
370 | return NULL; |
371 | } |
372 | |
373 | INIT_LIST_HEAD(list: &desc->tx_list); |
374 | dma_async_tx_descriptor_init(tx: &desc->async_tx, chan: &chan->chan); |
375 | /* each desc has submit */ |
376 | desc->async_tx.tx_submit = mmp_pdma_tx_submit; |
377 | desc->async_tx.phys = pdesc; |
378 | |
379 | return desc; |
380 | } |
381 | |
382 | /* |
383 | * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. |
384 | * |
385 | * This function will create a dma pool for descriptor allocation. |
386 | * Request irq only when channel is requested |
387 | * Return - The number of allocated descriptors. |
388 | */ |
389 | |
390 | static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) |
391 | { |
392 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
393 | |
394 | if (chan->desc_pool) |
395 | return 1; |
396 | |
397 | chan->desc_pool = dma_pool_create(name: dev_name(dev: &dchan->dev->device), |
398 | dev: chan->dev, |
399 | size: sizeof(struct mmp_pdma_desc_sw), |
400 | align: __alignof__(struct mmp_pdma_desc_sw), |
401 | allocation: 0); |
402 | if (!chan->desc_pool) { |
403 | dev_err(chan->dev, "unable to allocate descriptor pool\n" ); |
404 | return -ENOMEM; |
405 | } |
406 | |
407 | mmp_pdma_free_phy(pchan: chan); |
408 | chan->idle = true; |
409 | chan->dev_addr = 0; |
410 | return 1; |
411 | } |
412 | |
413 | static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, |
414 | struct list_head *list) |
415 | { |
416 | struct mmp_pdma_desc_sw *desc, *_desc; |
417 | |
418 | list_for_each_entry_safe(desc, _desc, list, node) { |
419 | list_del(entry: &desc->node); |
420 | dma_pool_free(pool: chan->desc_pool, vaddr: desc, addr: desc->async_tx.phys); |
421 | } |
422 | } |
423 | |
424 | static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) |
425 | { |
426 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
427 | unsigned long flags; |
428 | |
429 | spin_lock_irqsave(&chan->desc_lock, flags); |
430 | mmp_pdma_free_desc_list(chan, list: &chan->chain_pending); |
431 | mmp_pdma_free_desc_list(chan, list: &chan->chain_running); |
432 | spin_unlock_irqrestore(lock: &chan->desc_lock, flags); |
433 | |
434 | dma_pool_destroy(pool: chan->desc_pool); |
435 | chan->desc_pool = NULL; |
436 | chan->idle = true; |
437 | chan->dev_addr = 0; |
438 | mmp_pdma_free_phy(pchan: chan); |
439 | return; |
440 | } |
441 | |
442 | static struct dma_async_tx_descriptor * |
443 | mmp_pdma_prep_memcpy(struct dma_chan *dchan, |
444 | dma_addr_t dma_dst, dma_addr_t dma_src, |
445 | size_t len, unsigned long flags) |
446 | { |
447 | struct mmp_pdma_chan *chan; |
448 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; |
449 | size_t copy = 0; |
450 | |
451 | if (!dchan) |
452 | return NULL; |
453 | |
454 | if (!len) |
455 | return NULL; |
456 | |
457 | chan = to_mmp_pdma_chan(dchan); |
458 | chan->byte_align = false; |
459 | |
460 | if (!chan->dir) { |
461 | chan->dir = DMA_MEM_TO_MEM; |
462 | chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; |
463 | chan->dcmd |= DCMD_BURST32; |
464 | } |
465 | |
466 | do { |
467 | /* Allocate the link descriptor from DMA pool */ |
468 | new = mmp_pdma_alloc_descriptor(chan); |
469 | if (!new) { |
470 | dev_err(chan->dev, "no memory for desc\n" ); |
471 | goto fail; |
472 | } |
473 | |
474 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); |
475 | if (dma_src & 0x7 || dma_dst & 0x7) |
476 | chan->byte_align = true; |
477 | |
478 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); |
479 | new->desc.dsadr = dma_src; |
480 | new->desc.dtadr = dma_dst; |
481 | |
482 | if (!first) |
483 | first = new; |
484 | else |
485 | prev->desc.ddadr = new->async_tx.phys; |
486 | |
487 | new->async_tx.cookie = 0; |
488 | async_tx_ack(tx: &new->async_tx); |
489 | |
490 | prev = new; |
491 | len -= copy; |
492 | |
493 | if (chan->dir == DMA_MEM_TO_DEV) { |
494 | dma_src += copy; |
495 | } else if (chan->dir == DMA_DEV_TO_MEM) { |
496 | dma_dst += copy; |
497 | } else if (chan->dir == DMA_MEM_TO_MEM) { |
498 | dma_src += copy; |
499 | dma_dst += copy; |
500 | } |
501 | |
502 | /* Insert the link descriptor to the LD ring */ |
503 | list_add_tail(new: &new->node, head: &first->tx_list); |
504 | } while (len); |
505 | |
506 | first->async_tx.flags = flags; /* client is in control of this ack */ |
507 | first->async_tx.cookie = -EBUSY; |
508 | |
509 | /* last desc and fire IRQ */ |
510 | new->desc.ddadr = DDADR_STOP; |
511 | new->desc.dcmd |= DCMD_ENDIRQEN; |
512 | |
513 | chan->cyclic_first = NULL; |
514 | |
515 | return &first->async_tx; |
516 | |
517 | fail: |
518 | if (first) |
519 | mmp_pdma_free_desc_list(chan, list: &first->tx_list); |
520 | return NULL; |
521 | } |
522 | |
523 | static struct dma_async_tx_descriptor * |
524 | mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, |
525 | unsigned int sg_len, enum dma_transfer_direction dir, |
526 | unsigned long flags, void *context) |
527 | { |
528 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
529 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
530 | size_t len, avail; |
531 | struct scatterlist *sg; |
532 | dma_addr_t addr; |
533 | int i; |
534 | |
535 | if ((sgl == NULL) || (sg_len == 0)) |
536 | return NULL; |
537 | |
538 | chan->byte_align = false; |
539 | |
540 | mmp_pdma_config_write(dchan, cfg: &chan->slave_config, direction: dir); |
541 | |
542 | for_each_sg(sgl, sg, sg_len, i) { |
543 | addr = sg_dma_address(sg); |
544 | avail = sg_dma_len(sgl); |
545 | |
546 | do { |
547 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); |
548 | if (addr & 0x7) |
549 | chan->byte_align = true; |
550 | |
551 | /* allocate and populate the descriptor */ |
552 | new = mmp_pdma_alloc_descriptor(chan); |
553 | if (!new) { |
554 | dev_err(chan->dev, "no memory for desc\n" ); |
555 | goto fail; |
556 | } |
557 | |
558 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); |
559 | if (dir == DMA_MEM_TO_DEV) { |
560 | new->desc.dsadr = addr; |
561 | new->desc.dtadr = chan->dev_addr; |
562 | } else { |
563 | new->desc.dsadr = chan->dev_addr; |
564 | new->desc.dtadr = addr; |
565 | } |
566 | |
567 | if (!first) |
568 | first = new; |
569 | else |
570 | prev->desc.ddadr = new->async_tx.phys; |
571 | |
572 | new->async_tx.cookie = 0; |
573 | async_tx_ack(tx: &new->async_tx); |
574 | prev = new; |
575 | |
576 | /* Insert the link descriptor to the LD ring */ |
577 | list_add_tail(new: &new->node, head: &first->tx_list); |
578 | |
579 | /* update metadata */ |
580 | addr += len; |
581 | avail -= len; |
582 | } while (avail); |
583 | } |
584 | |
585 | first->async_tx.cookie = -EBUSY; |
586 | first->async_tx.flags = flags; |
587 | |
588 | /* last desc and fire IRQ */ |
589 | new->desc.ddadr = DDADR_STOP; |
590 | new->desc.dcmd |= DCMD_ENDIRQEN; |
591 | |
592 | chan->dir = dir; |
593 | chan->cyclic_first = NULL; |
594 | |
595 | return &first->async_tx; |
596 | |
597 | fail: |
598 | if (first) |
599 | mmp_pdma_free_desc_list(chan, list: &first->tx_list); |
600 | return NULL; |
601 | } |
602 | |
603 | static struct dma_async_tx_descriptor * |
604 | mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, |
605 | dma_addr_t buf_addr, size_t len, size_t period_len, |
606 | enum dma_transfer_direction direction, |
607 | unsigned long flags) |
608 | { |
609 | struct mmp_pdma_chan *chan; |
610 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; |
611 | dma_addr_t dma_src, dma_dst; |
612 | |
613 | if (!dchan || !len || !period_len) |
614 | return NULL; |
615 | |
616 | /* the buffer length must be a multiple of period_len */ |
617 | if (len % period_len != 0) |
618 | return NULL; |
619 | |
620 | if (period_len > PDMA_MAX_DESC_BYTES) |
621 | return NULL; |
622 | |
623 | chan = to_mmp_pdma_chan(dchan); |
624 | mmp_pdma_config_write(dchan, cfg: &chan->slave_config, direction); |
625 | |
626 | switch (direction) { |
627 | case DMA_MEM_TO_DEV: |
628 | dma_src = buf_addr; |
629 | dma_dst = chan->dev_addr; |
630 | break; |
631 | case DMA_DEV_TO_MEM: |
632 | dma_dst = buf_addr; |
633 | dma_src = chan->dev_addr; |
634 | break; |
635 | default: |
636 | dev_err(chan->dev, "Unsupported direction for cyclic DMA\n" ); |
637 | return NULL; |
638 | } |
639 | |
640 | chan->dir = direction; |
641 | |
642 | do { |
643 | /* Allocate the link descriptor from DMA pool */ |
644 | new = mmp_pdma_alloc_descriptor(chan); |
645 | if (!new) { |
646 | dev_err(chan->dev, "no memory for desc\n" ); |
647 | goto fail; |
648 | } |
649 | |
650 | new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | |
651 | (DCMD_LENGTH & period_len)); |
652 | new->desc.dsadr = dma_src; |
653 | new->desc.dtadr = dma_dst; |
654 | |
655 | if (!first) |
656 | first = new; |
657 | else |
658 | prev->desc.ddadr = new->async_tx.phys; |
659 | |
660 | new->async_tx.cookie = 0; |
661 | async_tx_ack(tx: &new->async_tx); |
662 | |
663 | prev = new; |
664 | len -= period_len; |
665 | |
666 | if (chan->dir == DMA_MEM_TO_DEV) |
667 | dma_src += period_len; |
668 | else |
669 | dma_dst += period_len; |
670 | |
671 | /* Insert the link descriptor to the LD ring */ |
672 | list_add_tail(new: &new->node, head: &first->tx_list); |
673 | } while (len); |
674 | |
675 | first->async_tx.flags = flags; /* client is in control of this ack */ |
676 | first->async_tx.cookie = -EBUSY; |
677 | |
678 | /* make the cyclic link */ |
679 | new->desc.ddadr = first->async_tx.phys; |
680 | chan->cyclic_first = first; |
681 | |
682 | return &first->async_tx; |
683 | |
684 | fail: |
685 | if (first) |
686 | mmp_pdma_free_desc_list(chan, list: &first->tx_list); |
687 | return NULL; |
688 | } |
689 | |
690 | static int mmp_pdma_config_write(struct dma_chan *dchan, |
691 | struct dma_slave_config *cfg, |
692 | enum dma_transfer_direction direction) |
693 | { |
694 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
695 | u32 maxburst = 0, addr = 0; |
696 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; |
697 | |
698 | if (!dchan) |
699 | return -EINVAL; |
700 | |
701 | if (direction == DMA_DEV_TO_MEM) { |
702 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; |
703 | maxburst = cfg->src_maxburst; |
704 | width = cfg->src_addr_width; |
705 | addr = cfg->src_addr; |
706 | } else if (direction == DMA_MEM_TO_DEV) { |
707 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; |
708 | maxburst = cfg->dst_maxburst; |
709 | width = cfg->dst_addr_width; |
710 | addr = cfg->dst_addr; |
711 | } |
712 | |
713 | if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) |
714 | chan->dcmd |= DCMD_WIDTH1; |
715 | else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) |
716 | chan->dcmd |= DCMD_WIDTH2; |
717 | else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) |
718 | chan->dcmd |= DCMD_WIDTH4; |
719 | |
720 | if (maxburst == 8) |
721 | chan->dcmd |= DCMD_BURST8; |
722 | else if (maxburst == 16) |
723 | chan->dcmd |= DCMD_BURST16; |
724 | else if (maxburst == 32) |
725 | chan->dcmd |= DCMD_BURST32; |
726 | |
727 | chan->dir = direction; |
728 | chan->dev_addr = addr; |
729 | |
730 | return 0; |
731 | } |
732 | |
733 | static int mmp_pdma_config(struct dma_chan *dchan, |
734 | struct dma_slave_config *cfg) |
735 | { |
736 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
737 | |
738 | memcpy(&chan->slave_config, cfg, sizeof(*cfg)); |
739 | return 0; |
740 | } |
741 | |
742 | static int mmp_pdma_terminate_all(struct dma_chan *dchan) |
743 | { |
744 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
745 | unsigned long flags; |
746 | |
747 | if (!dchan) |
748 | return -EINVAL; |
749 | |
750 | disable_chan(phy: chan->phy); |
751 | mmp_pdma_free_phy(pchan: chan); |
752 | spin_lock_irqsave(&chan->desc_lock, flags); |
753 | mmp_pdma_free_desc_list(chan, list: &chan->chain_pending); |
754 | mmp_pdma_free_desc_list(chan, list: &chan->chain_running); |
755 | spin_unlock_irqrestore(lock: &chan->desc_lock, flags); |
756 | chan->idle = true; |
757 | |
758 | return 0; |
759 | } |
760 | |
761 | static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan, |
762 | dma_cookie_t cookie) |
763 | { |
764 | struct mmp_pdma_desc_sw *sw; |
765 | u32 curr, residue = 0; |
766 | bool passed = false; |
767 | bool cyclic = chan->cyclic_first != NULL; |
768 | |
769 | /* |
770 | * If the channel does not have a phy pointer anymore, it has already |
771 | * been completed. Therefore, its residue is 0. |
772 | */ |
773 | if (!chan->phy) |
774 | return 0; |
775 | |
776 | if (chan->dir == DMA_DEV_TO_MEM) |
777 | curr = readl(addr: chan->phy->base + DTADR(chan->phy->idx)); |
778 | else |
779 | curr = readl(addr: chan->phy->base + DSADR(chan->phy->idx)); |
780 | |
781 | list_for_each_entry(sw, &chan->chain_running, node) { |
782 | u32 start, end, len; |
783 | |
784 | if (chan->dir == DMA_DEV_TO_MEM) |
785 | start = sw->desc.dtadr; |
786 | else |
787 | start = sw->desc.dsadr; |
788 | |
789 | len = sw->desc.dcmd & DCMD_LENGTH; |
790 | end = start + len; |
791 | |
792 | /* |
793 | * 'passed' will be latched once we found the descriptor which |
794 | * lies inside the boundaries of the curr pointer. All |
795 | * descriptors that occur in the list _after_ we found that |
796 | * partially handled descriptor are still to be processed and |
797 | * are hence added to the residual bytes counter. |
798 | */ |
799 | |
800 | if (passed) { |
801 | residue += len; |
802 | } else if (curr >= start && curr <= end) { |
803 | residue += end - curr; |
804 | passed = true; |
805 | } |
806 | |
807 | /* |
808 | * Descriptors that have the ENDIRQEN bit set mark the end of a |
809 | * transaction chain, and the cookie assigned with it has been |
810 | * returned previously from mmp_pdma_tx_submit(). |
811 | * |
812 | * In case we have multiple transactions in the running chain, |
813 | * and the cookie does not match the one the user asked us |
814 | * about, reset the state variables and start over. |
815 | * |
816 | * This logic does not apply to cyclic transactions, where all |
817 | * descriptors have the ENDIRQEN bit set, and for which we |
818 | * can't have multiple transactions on one channel anyway. |
819 | */ |
820 | if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN)) |
821 | continue; |
822 | |
823 | if (sw->async_tx.cookie == cookie) { |
824 | return residue; |
825 | } else { |
826 | residue = 0; |
827 | passed = false; |
828 | } |
829 | } |
830 | |
831 | /* We should only get here in case of cyclic transactions */ |
832 | return residue; |
833 | } |
834 | |
835 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, |
836 | dma_cookie_t cookie, |
837 | struct dma_tx_state *txstate) |
838 | { |
839 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
840 | enum dma_status ret; |
841 | |
842 | ret = dma_cookie_status(chan: dchan, cookie, state: txstate); |
843 | if (likely(ret != DMA_ERROR)) |
844 | dma_set_residue(state: txstate, residue: mmp_pdma_residue(chan, cookie)); |
845 | |
846 | return ret; |
847 | } |
848 | |
849 | /* |
850 | * mmp_pdma_issue_pending - Issue the DMA start command |
851 | * pending list ==> running list |
852 | */ |
853 | static void mmp_pdma_issue_pending(struct dma_chan *dchan) |
854 | { |
855 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
856 | unsigned long flags; |
857 | |
858 | spin_lock_irqsave(&chan->desc_lock, flags); |
859 | start_pending_queue(chan); |
860 | spin_unlock_irqrestore(lock: &chan->desc_lock, flags); |
861 | } |
862 | |
863 | /* |
864 | * dma_do_tasklet |
865 | * Do call back |
866 | * Start pending list |
867 | */ |
868 | static void dma_do_tasklet(struct tasklet_struct *t) |
869 | { |
870 | struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet); |
871 | struct mmp_pdma_desc_sw *desc, *_desc; |
872 | LIST_HEAD(chain_cleanup); |
873 | unsigned long flags; |
874 | struct dmaengine_desc_callback cb; |
875 | |
876 | if (chan->cyclic_first) { |
877 | spin_lock_irqsave(&chan->desc_lock, flags); |
878 | desc = chan->cyclic_first; |
879 | dmaengine_desc_get_callback(tx: &desc->async_tx, cb: &cb); |
880 | spin_unlock_irqrestore(lock: &chan->desc_lock, flags); |
881 | |
882 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
883 | |
884 | return; |
885 | } |
886 | |
887 | /* submit pending list; callback for each desc; free desc */ |
888 | spin_lock_irqsave(&chan->desc_lock, flags); |
889 | |
890 | list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) { |
891 | /* |
892 | * move the descriptors to a temporary list so we can drop |
893 | * the lock during the entire cleanup operation |
894 | */ |
895 | list_move(list: &desc->node, head: &chain_cleanup); |
896 | |
897 | /* |
898 | * Look for the first list entry which has the ENDIRQEN flag |
899 | * set. That is the descriptor we got an interrupt for, so |
900 | * complete that transaction and its cookie. |
901 | */ |
902 | if (desc->desc.dcmd & DCMD_ENDIRQEN) { |
903 | dma_cookie_t cookie = desc->async_tx.cookie; |
904 | dma_cookie_complete(tx: &desc->async_tx); |
905 | dev_dbg(chan->dev, "completed_cookie=%d\n" , cookie); |
906 | break; |
907 | } |
908 | } |
909 | |
910 | /* |
911 | * The hardware is idle and ready for more when the |
912 | * chain_running list is empty. |
913 | */ |
914 | chan->idle = list_empty(head: &chan->chain_running); |
915 | |
916 | /* Start any pending transactions automatically */ |
917 | start_pending_queue(chan); |
918 | spin_unlock_irqrestore(lock: &chan->desc_lock, flags); |
919 | |
920 | /* Run the callback for each descriptor, in order */ |
921 | list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { |
922 | struct dma_async_tx_descriptor *txd = &desc->async_tx; |
923 | |
924 | /* Remove from the list of transactions */ |
925 | list_del(entry: &desc->node); |
926 | /* Run the link descriptor callback function */ |
927 | dmaengine_desc_get_callback(tx: txd, cb: &cb); |
928 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
929 | |
930 | dma_pool_free(pool: chan->desc_pool, vaddr: desc, addr: txd->phys); |
931 | } |
932 | } |
933 | |
934 | static void mmp_pdma_remove(struct platform_device *op) |
935 | { |
936 | struct mmp_pdma_device *pdev = platform_get_drvdata(pdev: op); |
937 | struct mmp_pdma_phy *phy; |
938 | int i, irq = 0, irq_num = 0; |
939 | |
940 | if (op->dev.of_node) |
941 | of_dma_controller_free(np: op->dev.of_node); |
942 | |
943 | for (i = 0; i < pdev->dma_channels; i++) { |
944 | if (platform_get_irq(op, i) > 0) |
945 | irq_num++; |
946 | } |
947 | |
948 | if (irq_num != pdev->dma_channels) { |
949 | irq = platform_get_irq(op, 0); |
950 | devm_free_irq(dev: &op->dev, irq, dev_id: pdev); |
951 | } else { |
952 | for (i = 0; i < pdev->dma_channels; i++) { |
953 | phy = &pdev->phy[i]; |
954 | irq = platform_get_irq(op, i); |
955 | devm_free_irq(dev: &op->dev, irq, dev_id: phy); |
956 | } |
957 | } |
958 | |
959 | dma_async_device_unregister(device: &pdev->device); |
960 | } |
961 | |
962 | static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq) |
963 | { |
964 | struct mmp_pdma_phy *phy = &pdev->phy[idx]; |
965 | struct mmp_pdma_chan *chan; |
966 | int ret; |
967 | |
968 | chan = devm_kzalloc(dev: pdev->dev, size: sizeof(*chan), GFP_KERNEL); |
969 | if (chan == NULL) |
970 | return -ENOMEM; |
971 | |
972 | phy->idx = idx; |
973 | phy->base = pdev->base; |
974 | |
975 | if (irq) { |
976 | ret = devm_request_irq(dev: pdev->dev, irq, handler: mmp_pdma_chan_handler, |
977 | IRQF_SHARED, devname: "pdma" , dev_id: phy); |
978 | if (ret) { |
979 | dev_err(pdev->dev, "channel request irq fail!\n" ); |
980 | return ret; |
981 | } |
982 | } |
983 | |
984 | spin_lock_init(&chan->desc_lock); |
985 | chan->dev = pdev->dev; |
986 | chan->chan.device = &pdev->device; |
987 | tasklet_setup(t: &chan->tasklet, callback: dma_do_tasklet); |
988 | INIT_LIST_HEAD(list: &chan->chain_pending); |
989 | INIT_LIST_HEAD(list: &chan->chain_running); |
990 | |
991 | /* register virt channel to dma engine */ |
992 | list_add_tail(new: &chan->chan.device_node, head: &pdev->device.channels); |
993 | |
994 | return 0; |
995 | } |
996 | |
997 | static const struct of_device_id mmp_pdma_dt_ids[] = { |
998 | { .compatible = "marvell,pdma-1.0" , }, |
999 | {} |
1000 | }; |
1001 | MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); |
1002 | |
1003 | static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec, |
1004 | struct of_dma *ofdma) |
1005 | { |
1006 | struct mmp_pdma_device *d = ofdma->of_dma_data; |
1007 | struct dma_chan *chan; |
1008 | |
1009 | chan = dma_get_any_slave_channel(device: &d->device); |
1010 | if (!chan) |
1011 | return NULL; |
1012 | |
1013 | to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; |
1014 | |
1015 | return chan; |
1016 | } |
1017 | |
1018 | static int mmp_pdma_probe(struct platform_device *op) |
1019 | { |
1020 | struct mmp_pdma_device *pdev; |
1021 | struct mmp_dma_platdata *pdata = dev_get_platdata(dev: &op->dev); |
1022 | int i, ret, irq = 0; |
1023 | int dma_channels = 0, irq_num = 0; |
1024 | const enum dma_slave_buswidth widths = |
1025 | DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | |
1026 | DMA_SLAVE_BUSWIDTH_4_BYTES; |
1027 | |
1028 | pdev = devm_kzalloc(dev: &op->dev, size: sizeof(*pdev), GFP_KERNEL); |
1029 | if (!pdev) |
1030 | return -ENOMEM; |
1031 | |
1032 | pdev->dev = &op->dev; |
1033 | |
1034 | spin_lock_init(&pdev->phy_lock); |
1035 | |
1036 | pdev->base = devm_platform_ioremap_resource(pdev: op, index: 0); |
1037 | if (IS_ERR(ptr: pdev->base)) |
1038 | return PTR_ERR(ptr: pdev->base); |
1039 | |
1040 | if (pdev->dev->of_node) { |
1041 | /* Parse new and deprecated dma-channels properties */ |
1042 | if (of_property_read_u32(np: pdev->dev->of_node, propname: "dma-channels" , |
1043 | out_value: &dma_channels)) |
1044 | of_property_read_u32(np: pdev->dev->of_node, propname: "#dma-channels" , |
1045 | out_value: &dma_channels); |
1046 | } else if (pdata && pdata->dma_channels) { |
1047 | dma_channels = pdata->dma_channels; |
1048 | } else { |
1049 | dma_channels = 32; /* default 32 channel */ |
1050 | } |
1051 | pdev->dma_channels = dma_channels; |
1052 | |
1053 | for (i = 0; i < dma_channels; i++) { |
1054 | if (platform_get_irq_optional(op, i) > 0) |
1055 | irq_num++; |
1056 | } |
1057 | |
1058 | pdev->phy = devm_kcalloc(dev: pdev->dev, n: dma_channels, size: sizeof(*pdev->phy), |
1059 | GFP_KERNEL); |
1060 | if (pdev->phy == NULL) |
1061 | return -ENOMEM; |
1062 | |
1063 | INIT_LIST_HEAD(list: &pdev->device.channels); |
1064 | |
1065 | if (irq_num != dma_channels) { |
1066 | /* all chan share one irq, demux inside */ |
1067 | irq = platform_get_irq(op, 0); |
1068 | ret = devm_request_irq(dev: pdev->dev, irq, handler: mmp_pdma_int_handler, |
1069 | IRQF_SHARED, devname: "pdma" , dev_id: pdev); |
1070 | if (ret) |
1071 | return ret; |
1072 | } |
1073 | |
1074 | for (i = 0; i < dma_channels; i++) { |
1075 | irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i); |
1076 | ret = mmp_pdma_chan_init(pdev, idx: i, irq); |
1077 | if (ret) |
1078 | return ret; |
1079 | } |
1080 | |
1081 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); |
1082 | dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); |
1083 | dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask); |
1084 | dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask); |
1085 | pdev->device.dev = &op->dev; |
1086 | pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; |
1087 | pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; |
1088 | pdev->device.device_tx_status = mmp_pdma_tx_status; |
1089 | pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; |
1090 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; |
1091 | pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; |
1092 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; |
1093 | pdev->device.device_config = mmp_pdma_config; |
1094 | pdev->device.device_terminate_all = mmp_pdma_terminate_all; |
1095 | pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; |
1096 | pdev->device.src_addr_widths = widths; |
1097 | pdev->device.dst_addr_widths = widths; |
1098 | pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); |
1099 | pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; |
1100 | |
1101 | if (pdev->dev->coherent_dma_mask) |
1102 | dma_set_mask(dev: pdev->dev, mask: pdev->dev->coherent_dma_mask); |
1103 | else |
1104 | dma_set_mask(dev: pdev->dev, DMA_BIT_MASK(64)); |
1105 | |
1106 | ret = dma_async_device_register(device: &pdev->device); |
1107 | if (ret) { |
1108 | dev_err(pdev->device.dev, "unable to register\n" ); |
1109 | return ret; |
1110 | } |
1111 | |
1112 | if (op->dev.of_node) { |
1113 | /* Device-tree DMA controller registration */ |
1114 | ret = of_dma_controller_register(np: op->dev.of_node, |
1115 | of_dma_xlate: mmp_pdma_dma_xlate, data: pdev); |
1116 | if (ret < 0) { |
1117 | dev_err(&op->dev, "of_dma_controller_register failed\n" ); |
1118 | dma_async_device_unregister(device: &pdev->device); |
1119 | return ret; |
1120 | } |
1121 | } |
1122 | |
1123 | platform_set_drvdata(pdev: op, data: pdev); |
1124 | dev_info(pdev->device.dev, "initialized %d channels\n" , dma_channels); |
1125 | return 0; |
1126 | } |
1127 | |
1128 | static const struct platform_device_id mmp_pdma_id_table[] = { |
1129 | { "mmp-pdma" , }, |
1130 | { }, |
1131 | }; |
1132 | |
1133 | static struct platform_driver mmp_pdma_driver = { |
1134 | .driver = { |
1135 | .name = "mmp-pdma" , |
1136 | .of_match_table = mmp_pdma_dt_ids, |
1137 | }, |
1138 | .id_table = mmp_pdma_id_table, |
1139 | .probe = mmp_pdma_probe, |
1140 | .remove_new = mmp_pdma_remove, |
1141 | }; |
1142 | |
1143 | module_platform_driver(mmp_pdma_driver); |
1144 | |
1145 | MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver" ); |
1146 | MODULE_AUTHOR("Marvell International Ltd." ); |
1147 | MODULE_LICENSE("GPL v2" ); |
1148 | |