1 | // SPDX-License-Identifier: GPL-2.0-or-later |
---|---|
2 | /* |
3 | * DMA driver for Xilinx DMA/Bridge Subsystem |
4 | * |
5 | * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. |
6 | * Copyright (C) 2022, Advanced Micro Devices, Inc. |
7 | */ |
8 | |
9 | /* |
10 | * The DMA/Bridge Subsystem for PCI Express allows for the movement of data |
11 | * between Host memory and the DMA subsystem. It does this by operating on |
12 | * 'descriptors' that contain information about the source, destination and |
13 | * amount of data to transfer. These direct memory transfers can be both in |
14 | * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be |
15 | * configured to have a single AXI4 Master interface shared by all channels |
16 | * or one AXI4-Stream interface for each channel enabled. Memory transfers are |
17 | * specified on a per-channel basis in descriptor linked lists, which the DMA |
18 | * fetches from host memory and processes. Events such as descriptor completion |
19 | * and errors are signaled using interrupts. The core also provides up to 16 |
20 | * user interrupt wires that generate interrupts to the host. |
21 | */ |
22 | |
23 | #include <linux/mod_devicetable.h> |
24 | #include <linux/bitfield.h> |
25 | #include <linux/dmapool.h> |
26 | #include <linux/regmap.h> |
27 | #include <linux/dmaengine.h> |
28 | #include <linux/dma/amd_xdma.h> |
29 | #include <linux/platform_device.h> |
30 | #include <linux/platform_data/amd_xdma.h> |
31 | #include <linux/dma-mapping.h> |
32 | #include <linux/pci.h> |
33 | #include "../virt-dma.h" |
34 | #include "xdma-regs.h" |
35 | |
36 | /* mmio regmap config for all XDMA registers */ |
37 | static const struct regmap_config xdma_regmap_config = { |
38 | .reg_bits = 32, |
39 | .val_bits = 32, |
40 | .reg_stride = 4, |
41 | .max_register = XDMA_REG_SPACE_LEN, |
42 | }; |
43 | |
44 | /** |
45 | * struct xdma_desc_block - Descriptor block |
46 | * @virt_addr: Virtual address of block start |
47 | * @dma_addr: DMA address of block start |
48 | */ |
49 | struct xdma_desc_block { |
50 | void *virt_addr; |
51 | dma_addr_t dma_addr; |
52 | }; |
53 | |
54 | /** |
55 | * struct xdma_chan - Driver specific DMA channel structure |
56 | * @vchan: Virtual channel |
57 | * @xdev_hdl: Pointer to DMA device structure |
58 | * @base: Offset of channel registers |
59 | * @desc_pool: Descriptor pool |
60 | * @busy: Busy flag of the channel |
61 | * @dir: Transferring direction of the channel |
62 | * @cfg: Transferring config of the channel |
63 | * @irq: IRQ assigned to the channel |
64 | */ |
65 | struct xdma_chan { |
66 | struct virt_dma_chan vchan; |
67 | void *xdev_hdl; |
68 | u32 base; |
69 | struct dma_pool *desc_pool; |
70 | bool busy; |
71 | enum dma_transfer_direction dir; |
72 | struct dma_slave_config cfg; |
73 | u32 irq; |
74 | struct completion last_interrupt; |
75 | bool stop_requested; |
76 | }; |
77 | |
78 | /** |
79 | * struct xdma_desc - DMA desc structure |
80 | * @vdesc: Virtual DMA descriptor |
81 | * @chan: DMA channel pointer |
82 | * @dir: Transferring direction of the request |
83 | * @desc_blocks: Hardware descriptor blocks |
84 | * @dblk_num: Number of hardware descriptor blocks |
85 | * @desc_num: Number of hardware descriptors |
86 | * @completed_desc_num: Completed hardware descriptors |
87 | * @cyclic: Cyclic transfer vs. scatter-gather |
88 | * @interleaved_dma: Interleaved DMA transfer |
89 | * @periods: Number of periods in the cyclic transfer |
90 | * @period_size: Size of a period in bytes in cyclic transfers |
91 | * @frames_left: Number of frames left in interleaved DMA transfer |
92 | * @error: tx error flag |
93 | */ |
94 | struct xdma_desc { |
95 | struct virt_dma_desc vdesc; |
96 | struct xdma_chan *chan; |
97 | enum dma_transfer_direction dir; |
98 | struct xdma_desc_block *desc_blocks; |
99 | u32 dblk_num; |
100 | u32 desc_num; |
101 | u32 completed_desc_num; |
102 | bool cyclic; |
103 | bool interleaved_dma; |
104 | u32 periods; |
105 | u32 period_size; |
106 | u32 frames_left; |
107 | bool error; |
108 | }; |
109 | |
110 | #define XDMA_DEV_STATUS_REG_DMA BIT(0) |
111 | #define XDMA_DEV_STATUS_INIT_MSIX BIT(1) |
112 | |
113 | /** |
114 | * struct xdma_device - DMA device structure |
115 | * @pdev: Platform device pointer |
116 | * @dma_dev: DMA device structure |
117 | * @rmap: MMIO regmap for DMA registers |
118 | * @h2c_chans: Host to Card channels |
119 | * @c2h_chans: Card to Host channels |
120 | * @h2c_chan_num: Number of H2C channels |
121 | * @c2h_chan_num: Number of C2H channels |
122 | * @irq_start: Start IRQ assigned to device |
123 | * @irq_num: Number of IRQ assigned to device |
124 | * @status: Initialization status |
125 | */ |
126 | struct xdma_device { |
127 | struct platform_device *pdev; |
128 | struct dma_device dma_dev; |
129 | struct regmap *rmap; |
130 | struct xdma_chan *h2c_chans; |
131 | struct xdma_chan *c2h_chans; |
132 | u32 h2c_chan_num; |
133 | u32 c2h_chan_num; |
134 | u32 irq_start; |
135 | u32 irq_num; |
136 | u32 status; |
137 | }; |
138 | |
139 | #define xdma_err(xdev, fmt, args...) \ |
140 | dev_err(&(xdev)->pdev->dev, fmt, ##args) |
141 | #define XDMA_CHAN_NUM(_xd) ({ \ |
142 | typeof(_xd) (xd) = (_xd); \ |
143 | ((xd)->h2c_chan_num + (xd)->c2h_chan_num); }) |
144 | |
145 | /* Get the last desc in a desc block */ |
146 | static inline void *xdma_blk_last_desc(struct xdma_desc_block *block) |
147 | { |
148 | return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE; |
149 | } |
150 | |
151 | /** |
152 | * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer |
153 | * @sw_desc: Tx descriptor pointer |
154 | */ |
155 | static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc) |
156 | { |
157 | struct xdma_desc_block *block; |
158 | u32 last_blk_desc, desc_control; |
159 | struct xdma_hw_desc *desc; |
160 | int i; |
161 | |
162 | desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0); |
163 | for (i = 1; i < sw_desc->dblk_num; i++) { |
164 | block = &sw_desc->desc_blocks[i - 1]; |
165 | desc = xdma_blk_last_desc(block); |
166 | |
167 | if (!(i & XDMA_DESC_BLOCK_MASK)) { |
168 | desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); |
169 | continue; |
170 | } |
171 | desc->control = cpu_to_le32(desc_control); |
172 | desc->next_desc = cpu_to_le64(block[1].dma_addr); |
173 | } |
174 | |
175 | /* update the last block */ |
176 | last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; |
177 | if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) { |
178 | block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2]; |
179 | desc = xdma_blk_last_desc(block); |
180 | desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0); |
181 | desc->control = cpu_to_le32(desc_control); |
182 | } |
183 | |
184 | block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1]; |
185 | desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE; |
186 | desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); |
187 | } |
188 | |
189 | /** |
190 | * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer |
191 | * @sw_desc: Tx descriptor pointer |
192 | */ |
193 | static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc) |
194 | { |
195 | struct xdma_desc_block *block; |
196 | struct xdma_hw_desc *desc; |
197 | int i; |
198 | |
199 | block = sw_desc->desc_blocks; |
200 | for (i = 0; i < sw_desc->desc_num - 1; i++) { |
201 | desc = block->virt_addr + i * XDMA_DESC_SIZE; |
202 | desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE)); |
203 | } |
204 | desc = block->virt_addr + i * XDMA_DESC_SIZE; |
205 | desc->next_desc = cpu_to_le64(block->dma_addr); |
206 | } |
207 | |
208 | static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan) |
209 | { |
210 | return container_of(chan, struct xdma_chan, vchan.chan); |
211 | } |
212 | |
213 | static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc) |
214 | { |
215 | return container_of(vdesc, struct xdma_desc, vdesc); |
216 | } |
217 | |
218 | /** |
219 | * xdma_channel_init - Initialize DMA channel registers |
220 | * @chan: DMA channel pointer |
221 | */ |
222 | static int xdma_channel_init(struct xdma_chan *chan) |
223 | { |
224 | struct xdma_device *xdev = chan->xdev_hdl; |
225 | int ret; |
226 | |
227 | ret = regmap_write(map: xdev->rmap, reg: chan->base + XDMA_CHAN_CONTROL_W1C, |
228 | CHAN_CTRL_NON_INCR_ADDR); |
229 | if (ret) |
230 | return ret; |
231 | |
232 | ret = regmap_write(map: xdev->rmap, reg: chan->base + XDMA_CHAN_INTR_ENABLE, |
233 | CHAN_IM_ALL); |
234 | if (ret) |
235 | return ret; |
236 | |
237 | return 0; |
238 | } |
239 | |
240 | /** |
241 | * xdma_free_desc - Free descriptor |
242 | * @vdesc: Virtual DMA descriptor |
243 | */ |
244 | static void xdma_free_desc(struct virt_dma_desc *vdesc) |
245 | { |
246 | struct xdma_desc *sw_desc; |
247 | int i; |
248 | |
249 | sw_desc = to_xdma_desc(vdesc); |
250 | for (i = 0; i < sw_desc->dblk_num; i++) { |
251 | if (!sw_desc->desc_blocks[i].virt_addr) |
252 | break; |
253 | dma_pool_free(pool: sw_desc->chan->desc_pool, |
254 | vaddr: sw_desc->desc_blocks[i].virt_addr, |
255 | addr: sw_desc->desc_blocks[i].dma_addr); |
256 | } |
257 | kfree(objp: sw_desc->desc_blocks); |
258 | kfree(objp: sw_desc); |
259 | } |
260 | |
261 | /** |
262 | * xdma_alloc_desc - Allocate descriptor |
263 | * @chan: DMA channel pointer |
264 | * @desc_num: Number of hardware descriptors |
265 | * @cyclic: Whether this is a cyclic transfer |
266 | */ |
267 | static struct xdma_desc * |
268 | xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic) |
269 | { |
270 | struct xdma_desc *sw_desc; |
271 | struct xdma_hw_desc *desc; |
272 | dma_addr_t dma_addr; |
273 | u32 dblk_num; |
274 | u32 control; |
275 | void *addr; |
276 | int i, j; |
277 | |
278 | sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT); |
279 | if (!sw_desc) |
280 | return NULL; |
281 | |
282 | sw_desc->chan = chan; |
283 | sw_desc->desc_num = desc_num; |
284 | sw_desc->cyclic = cyclic; |
285 | sw_desc->error = false; |
286 | dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT); |
287 | sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks), |
288 | GFP_NOWAIT); |
289 | if (!sw_desc->desc_blocks) |
290 | goto failed; |
291 | |
292 | if (cyclic) |
293 | control = XDMA_DESC_CONTROL_CYCLIC; |
294 | else |
295 | control = XDMA_DESC_CONTROL(1, 0); |
296 | |
297 | sw_desc->dblk_num = dblk_num; |
298 | for (i = 0; i < sw_desc->dblk_num; i++) { |
299 | addr = dma_pool_alloc(pool: chan->desc_pool, GFP_NOWAIT, handle: &dma_addr); |
300 | if (!addr) |
301 | goto failed; |
302 | |
303 | sw_desc->desc_blocks[i].virt_addr = addr; |
304 | sw_desc->desc_blocks[i].dma_addr = dma_addr; |
305 | for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++) |
306 | desc[j].control = cpu_to_le32(control); |
307 | } |
308 | |
309 | if (cyclic) |
310 | xdma_link_cyclic_desc_blocks(sw_desc); |
311 | else |
312 | xdma_link_sg_desc_blocks(sw_desc); |
313 | |
314 | return sw_desc; |
315 | |
316 | failed: |
317 | xdma_free_desc(vdesc: &sw_desc->vdesc); |
318 | return NULL; |
319 | } |
320 | |
321 | /** |
322 | * xdma_xfer_start - Start DMA transfer |
323 | * @xchan: DMA channel pointer |
324 | */ |
325 | static int xdma_xfer_start(struct xdma_chan *xchan) |
326 | { |
327 | struct virt_dma_desc *vd = vchan_next_desc(vc: &xchan->vchan); |
328 | struct xdma_device *xdev = xchan->xdev_hdl; |
329 | struct xdma_desc_block *block; |
330 | u32 val, completed_blocks; |
331 | struct xdma_desc *desc; |
332 | int ret; |
333 | |
334 | /* |
335 | * check if there is not any submitted descriptor or channel is busy. |
336 | * vchan lock should be held where this function is called. |
337 | */ |
338 | if (!vd || xchan->busy) |
339 | return -EINVAL; |
340 | |
341 | /* clear run stop bit to get ready for transfer */ |
342 | ret = regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_CHAN_CONTROL_W1C, |
343 | CHAN_CTRL_RUN_STOP); |
344 | if (ret) |
345 | return ret; |
346 | |
347 | desc = to_xdma_desc(vdesc: vd); |
348 | if (desc->dir != xchan->dir) { |
349 | xdma_err(xdev, "incorrect request direction"); |
350 | return -EINVAL; |
351 | } |
352 | |
353 | /* set DMA engine to the first descriptor block */ |
354 | completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT; |
355 | block = &desc->desc_blocks[completed_blocks]; |
356 | val = lower_32_bits(block->dma_addr); |
357 | ret = regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_SGDMA_DESC_LO, val); |
358 | if (ret) |
359 | return ret; |
360 | |
361 | val = upper_32_bits(block->dma_addr); |
362 | ret = regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_SGDMA_DESC_HI, val); |
363 | if (ret) |
364 | return ret; |
365 | |
366 | if (completed_blocks + 1 == desc->dblk_num) |
367 | val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; |
368 | else |
369 | val = XDMA_DESC_ADJACENT - 1; |
370 | ret = regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_SGDMA_DESC_ADJ, val); |
371 | if (ret) |
372 | return ret; |
373 | |
374 | /* kick off DMA transfer */ |
375 | ret = regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_CHAN_CONTROL, |
376 | CHAN_CTRL_START); |
377 | if (ret) |
378 | return ret; |
379 | |
380 | xchan->busy = true; |
381 | xchan->stop_requested = false; |
382 | reinit_completion(x: &xchan->last_interrupt); |
383 | |
384 | return 0; |
385 | } |
386 | |
387 | /** |
388 | * xdma_xfer_stop - Stop DMA transfer |
389 | * @xchan: DMA channel pointer |
390 | */ |
391 | static int xdma_xfer_stop(struct xdma_chan *xchan) |
392 | { |
393 | struct xdma_device *xdev = xchan->xdev_hdl; |
394 | |
395 | /* clear run stop bit to prevent any further auto-triggering */ |
396 | return regmap_write(map: xdev->rmap, reg: xchan->base + XDMA_CHAN_CONTROL_W1C, |
397 | CHAN_CTRL_RUN_STOP); |
398 | } |
399 | |
400 | /** |
401 | * xdma_alloc_channels - Detect and allocate DMA channels |
402 | * @xdev: DMA device pointer |
403 | * @dir: Channel direction |
404 | */ |
405 | static int xdma_alloc_channels(struct xdma_device *xdev, |
406 | enum dma_transfer_direction dir) |
407 | { |
408 | struct xdma_platdata *pdata = dev_get_platdata(dev: &xdev->pdev->dev); |
409 | struct xdma_chan **chans, *xchan; |
410 | u32 base, identifier, target; |
411 | u32 *chan_num; |
412 | int i, j, ret; |
413 | |
414 | if (dir == DMA_MEM_TO_DEV) { |
415 | base = XDMA_CHAN_H2C_OFFSET; |
416 | target = XDMA_CHAN_H2C_TARGET; |
417 | chans = &xdev->h2c_chans; |
418 | chan_num = &xdev->h2c_chan_num; |
419 | } else if (dir == DMA_DEV_TO_MEM) { |
420 | base = XDMA_CHAN_C2H_OFFSET; |
421 | target = XDMA_CHAN_C2H_TARGET; |
422 | chans = &xdev->c2h_chans; |
423 | chan_num = &xdev->c2h_chan_num; |
424 | } else { |
425 | xdma_err(xdev, "invalid direction specified"); |
426 | return -EINVAL; |
427 | } |
428 | |
429 | /* detect number of available DMA channels */ |
430 | for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) { |
431 | ret = regmap_read(map: xdev->rmap, reg: base + i * XDMA_CHAN_STRIDE, |
432 | val: &identifier); |
433 | if (ret) |
434 | return ret; |
435 | |
436 | /* check if it is available DMA channel */ |
437 | if (XDMA_CHAN_CHECK_TARGET(identifier, target)) |
438 | (*chan_num)++; |
439 | } |
440 | |
441 | if (!*chan_num) { |
442 | xdma_err(xdev, "does not probe any channel"); |
443 | return -EINVAL; |
444 | } |
445 | |
446 | *chans = devm_kcalloc(dev: &xdev->pdev->dev, n: *chan_num, size: sizeof(**chans), |
447 | GFP_KERNEL); |
448 | if (!*chans) |
449 | return -ENOMEM; |
450 | |
451 | for (i = 0, j = 0; i < pdata->max_dma_channels; i++) { |
452 | ret = regmap_read(map: xdev->rmap, reg: base + i * XDMA_CHAN_STRIDE, |
453 | val: &identifier); |
454 | if (ret) |
455 | return ret; |
456 | |
457 | if (!XDMA_CHAN_CHECK_TARGET(identifier, target)) |
458 | continue; |
459 | |
460 | if (j == *chan_num) { |
461 | xdma_err(xdev, "invalid channel number"); |
462 | return -EIO; |
463 | } |
464 | |
465 | /* init channel structure and hardware */ |
466 | xchan = &(*chans)[j]; |
467 | xchan->xdev_hdl = xdev; |
468 | xchan->base = base + i * XDMA_CHAN_STRIDE; |
469 | xchan->dir = dir; |
470 | xchan->stop_requested = false; |
471 | init_completion(x: &xchan->last_interrupt); |
472 | |
473 | ret = xdma_channel_init(chan: xchan); |
474 | if (ret) |
475 | return ret; |
476 | xchan->vchan.desc_free = xdma_free_desc; |
477 | vchan_init(vc: &xchan->vchan, dmadev: &xdev->dma_dev); |
478 | |
479 | j++; |
480 | } |
481 | |
482 | dev_info(&xdev->pdev->dev, "configured %d %s channels", j, |
483 | (dir == DMA_MEM_TO_DEV) ? "H2C": "C2H"); |
484 | |
485 | return 0; |
486 | } |
487 | |
488 | /** |
489 | * xdma_issue_pending - Issue pending transactions |
490 | * @chan: DMA channel pointer |
491 | */ |
492 | static void xdma_issue_pending(struct dma_chan *chan) |
493 | { |
494 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
495 | unsigned long flags; |
496 | |
497 | spin_lock_irqsave(&xdma_chan->vchan.lock, flags); |
498 | if (vchan_issue_pending(vc: &xdma_chan->vchan)) |
499 | xdma_xfer_start(xchan: xdma_chan); |
500 | spin_unlock_irqrestore(lock: &xdma_chan->vchan.lock, flags); |
501 | } |
502 | |
503 | /** |
504 | * xdma_terminate_all - Terminate all transactions |
505 | * @chan: DMA channel pointer |
506 | */ |
507 | static int xdma_terminate_all(struct dma_chan *chan) |
508 | { |
509 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
510 | struct virt_dma_desc *vd; |
511 | unsigned long flags; |
512 | LIST_HEAD(head); |
513 | |
514 | xdma_xfer_stop(xchan: xdma_chan); |
515 | |
516 | spin_lock_irqsave(&xdma_chan->vchan.lock, flags); |
517 | |
518 | xdma_chan->busy = false; |
519 | xdma_chan->stop_requested = true; |
520 | vd = vchan_next_desc(vc: &xdma_chan->vchan); |
521 | if (vd) { |
522 | list_del(entry: &vd->node); |
523 | dma_cookie_complete(tx: &vd->tx); |
524 | vchan_terminate_vdesc(vd); |
525 | } |
526 | vchan_get_all_descriptors(vc: &xdma_chan->vchan, head: &head); |
527 | list_splice_tail(list: &head, head: &xdma_chan->vchan.desc_terminated); |
528 | |
529 | spin_unlock_irqrestore(lock: &xdma_chan->vchan.lock, flags); |
530 | |
531 | return 0; |
532 | } |
533 | |
534 | /** |
535 | * xdma_synchronize - Synchronize terminated transactions |
536 | * @chan: DMA channel pointer |
537 | */ |
538 | static void xdma_synchronize(struct dma_chan *chan) |
539 | { |
540 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
541 | struct xdma_device *xdev = xdma_chan->xdev_hdl; |
542 | int st = 0; |
543 | |
544 | /* If the engine continues running, wait for the last interrupt */ |
545 | regmap_read(map: xdev->rmap, reg: xdma_chan->base + XDMA_CHAN_STATUS, val: &st); |
546 | if (st & XDMA_CHAN_STATUS_BUSY) |
547 | wait_for_completion_timeout(x: &xdma_chan->last_interrupt, timeout: msecs_to_jiffies(m: 1000)); |
548 | |
549 | vchan_synchronize(vc: &xdma_chan->vchan); |
550 | } |
551 | |
552 | /** |
553 | * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk. |
554 | * More than one descriptor will be used if the size is bigger |
555 | * than XDMA_DESC_BLEN_MAX. |
556 | * @sw_desc: Descriptor container |
557 | * @src_addr: First value for the ->src_addr field |
558 | * @dst_addr: First value for the ->dst_addr field |
559 | * @size: Size of the contiguous memory block |
560 | * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc |
561 | */ |
562 | static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr, |
563 | u64 dst_addr, u32 size, u32 filled_descs_num) |
564 | { |
565 | u32 left = size, len, desc_num = filled_descs_num; |
566 | struct xdma_desc_block *dblk; |
567 | struct xdma_hw_desc *desc; |
568 | |
569 | dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT); |
570 | desc = dblk->virt_addr; |
571 | desc += desc_num & XDMA_DESC_ADJACENT_MASK; |
572 | do { |
573 | len = min_t(u32, left, XDMA_DESC_BLEN_MAX); |
574 | /* set hardware descriptor */ |
575 | desc->bytes = cpu_to_le32(len); |
576 | desc->src_addr = cpu_to_le64(src_addr); |
577 | desc->dst_addr = cpu_to_le64(dst_addr); |
578 | if (!(++desc_num & XDMA_DESC_ADJACENT_MASK)) |
579 | desc = (++dblk)->virt_addr; |
580 | else |
581 | desc++; |
582 | |
583 | src_addr += len; |
584 | dst_addr += len; |
585 | left -= len; |
586 | } while (left); |
587 | |
588 | return desc_num - filled_descs_num; |
589 | } |
590 | |
591 | /** |
592 | * xdma_prep_device_sg - prepare a descriptor for a DMA transaction |
593 | * @chan: DMA channel pointer |
594 | * @sgl: Transfer scatter gather list |
595 | * @sg_len: Length of scatter gather list |
596 | * @dir: Transfer direction |
597 | * @flags: transfer ack flags |
598 | * @context: APP words of the descriptor |
599 | */ |
600 | static struct dma_async_tx_descriptor * |
601 | xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl, |
602 | unsigned int sg_len, enum dma_transfer_direction dir, |
603 | unsigned long flags, void *context) |
604 | { |
605 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
606 | struct dma_async_tx_descriptor *tx_desc; |
607 | struct xdma_desc *sw_desc; |
608 | u32 desc_num = 0, i; |
609 | u64 addr, dev_addr, *src, *dst; |
610 | struct scatterlist *sg; |
611 | |
612 | for_each_sg(sgl, sg, sg_len, i) |
613 | desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX); |
614 | |
615 | sw_desc = xdma_alloc_desc(chan: xdma_chan, desc_num, cyclic: false); |
616 | if (!sw_desc) |
617 | return NULL; |
618 | sw_desc->dir = dir; |
619 | sw_desc->cyclic = false; |
620 | sw_desc->interleaved_dma = false; |
621 | |
622 | if (dir == DMA_MEM_TO_DEV) { |
623 | dev_addr = xdma_chan->cfg.dst_addr; |
624 | src = &addr; |
625 | dst = &dev_addr; |
626 | } else { |
627 | dev_addr = xdma_chan->cfg.src_addr; |
628 | src = &dev_addr; |
629 | dst = &addr; |
630 | } |
631 | |
632 | desc_num = 0; |
633 | for_each_sg(sgl, sg, sg_len, i) { |
634 | addr = sg_dma_address(sg); |
635 | desc_num += xdma_fill_descs(sw_desc, src_addr: *src, dst_addr: *dst, sg_dma_len(sg), filled_descs_num: desc_num); |
636 | dev_addr += sg_dma_len(sg); |
637 | } |
638 | |
639 | tx_desc = vchan_tx_prep(vc: &xdma_chan->vchan, vd: &sw_desc->vdesc, tx_flags: flags); |
640 | if (!tx_desc) |
641 | goto failed; |
642 | |
643 | return tx_desc; |
644 | |
645 | failed: |
646 | xdma_free_desc(vdesc: &sw_desc->vdesc); |
647 | |
648 | return NULL; |
649 | } |
650 | |
651 | /** |
652 | * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions |
653 | * @chan: DMA channel pointer |
654 | * @address: Device DMA address to access |
655 | * @size: Total length to transfer |
656 | * @period_size: Period size to use for each transfer |
657 | * @dir: Transfer direction |
658 | * @flags: Transfer ack flags |
659 | */ |
660 | static struct dma_async_tx_descriptor * |
661 | xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address, |
662 | size_t size, size_t period_size, |
663 | enum dma_transfer_direction dir, |
664 | unsigned long flags) |
665 | { |
666 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
667 | struct xdma_device *xdev = xdma_chan->xdev_hdl; |
668 | unsigned int periods = size / period_size; |
669 | struct dma_async_tx_descriptor *tx_desc; |
670 | struct xdma_desc *sw_desc; |
671 | u64 addr, dev_addr, *src, *dst; |
672 | u32 desc_num; |
673 | unsigned int i; |
674 | |
675 | /* |
676 | * Simplify the whole logic by preventing an abnormally high number of |
677 | * periods and periods size. |
678 | */ |
679 | if (period_size > XDMA_DESC_BLEN_MAX) { |
680 | xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX); |
681 | return NULL; |
682 | } |
683 | |
684 | if (periods > XDMA_DESC_ADJACENT) { |
685 | xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT); |
686 | return NULL; |
687 | } |
688 | |
689 | sw_desc = xdma_alloc_desc(chan: xdma_chan, desc_num: periods, cyclic: true); |
690 | if (!sw_desc) |
691 | return NULL; |
692 | |
693 | sw_desc->periods = periods; |
694 | sw_desc->period_size = period_size; |
695 | sw_desc->dir = dir; |
696 | sw_desc->interleaved_dma = false; |
697 | |
698 | addr = address; |
699 | if (dir == DMA_MEM_TO_DEV) { |
700 | dev_addr = xdma_chan->cfg.dst_addr; |
701 | src = &addr; |
702 | dst = &dev_addr; |
703 | } else { |
704 | dev_addr = xdma_chan->cfg.src_addr; |
705 | src = &dev_addr; |
706 | dst = &addr; |
707 | } |
708 | |
709 | desc_num = 0; |
710 | for (i = 0; i < periods; i++) { |
711 | desc_num += xdma_fill_descs(sw_desc, src_addr: *src, dst_addr: *dst, size: period_size, filled_descs_num: desc_num); |
712 | addr += period_size; |
713 | } |
714 | |
715 | tx_desc = vchan_tx_prep(vc: &xdma_chan->vchan, vd: &sw_desc->vdesc, tx_flags: flags); |
716 | if (!tx_desc) |
717 | goto failed; |
718 | |
719 | return tx_desc; |
720 | |
721 | failed: |
722 | xdma_free_desc(vdesc: &sw_desc->vdesc); |
723 | |
724 | return NULL; |
725 | } |
726 | |
727 | /** |
728 | * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers |
729 | * @chan: DMA channel |
730 | * @xt: DMA transfer template |
731 | * @flags: tx flags |
732 | */ |
733 | static struct dma_async_tx_descriptor * |
734 | xdma_prep_interleaved_dma(struct dma_chan *chan, |
735 | struct dma_interleaved_template *xt, |
736 | unsigned long flags) |
737 | { |
738 | int i; |
739 | u32 desc_num = 0, period_size = 0; |
740 | struct dma_async_tx_descriptor *tx_desc; |
741 | struct xdma_chan *xchan = to_xdma_chan(chan); |
742 | struct xdma_desc *sw_desc; |
743 | u64 src_addr, dst_addr; |
744 | |
745 | for (i = 0; i < xt->frame_size; ++i) |
746 | desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX); |
747 | |
748 | sw_desc = xdma_alloc_desc(chan: xchan, desc_num, cyclic: false); |
749 | if (!sw_desc) |
750 | return NULL; |
751 | sw_desc->dir = xt->dir; |
752 | sw_desc->interleaved_dma = true; |
753 | sw_desc->cyclic = flags & DMA_PREP_REPEAT; |
754 | sw_desc->frames_left = xt->numf; |
755 | sw_desc->periods = xt->numf; |
756 | |
757 | desc_num = 0; |
758 | src_addr = xt->src_start; |
759 | dst_addr = xt->dst_start; |
760 | for (i = 0; i < xt->frame_size; ++i) { |
761 | desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, size: xt->sgl[i].size, filled_descs_num: desc_num); |
762 | src_addr += dmaengine_get_src_icg(xt, chunk: &xt->sgl[i]) + (xt->src_inc ? |
763 | xt->sgl[i].size : 0); |
764 | dst_addr += dmaengine_get_dst_icg(xt, chunk: &xt->sgl[i]) + (xt->dst_inc ? |
765 | xt->sgl[i].size : 0); |
766 | period_size += xt->sgl[i].size; |
767 | } |
768 | sw_desc->period_size = period_size; |
769 | |
770 | tx_desc = vchan_tx_prep(vc: &xchan->vchan, vd: &sw_desc->vdesc, tx_flags: flags); |
771 | if (tx_desc) |
772 | return tx_desc; |
773 | |
774 | xdma_free_desc(vdesc: &sw_desc->vdesc); |
775 | return NULL; |
776 | } |
777 | |
778 | /** |
779 | * xdma_device_config - Configure the DMA channel |
780 | * @chan: DMA channel |
781 | * @cfg: channel configuration |
782 | */ |
783 | static int xdma_device_config(struct dma_chan *chan, |
784 | struct dma_slave_config *cfg) |
785 | { |
786 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
787 | |
788 | memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg)); |
789 | |
790 | return 0; |
791 | } |
792 | |
793 | /** |
794 | * xdma_free_chan_resources - Free channel resources |
795 | * @chan: DMA channel |
796 | */ |
797 | static void xdma_free_chan_resources(struct dma_chan *chan) |
798 | { |
799 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
800 | |
801 | vchan_free_chan_resources(vc: &xdma_chan->vchan); |
802 | dma_pool_destroy(pool: xdma_chan->desc_pool); |
803 | xdma_chan->desc_pool = NULL; |
804 | } |
805 | |
806 | /** |
807 | * xdma_alloc_chan_resources - Allocate channel resources |
808 | * @chan: DMA channel |
809 | */ |
810 | static int xdma_alloc_chan_resources(struct dma_chan *chan) |
811 | { |
812 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
813 | struct xdma_device *xdev = xdma_chan->xdev_hdl; |
814 | struct device *dev = xdev->dma_dev.dev; |
815 | |
816 | while (dev && !dev_is_pci(dev)) |
817 | dev = dev->parent; |
818 | if (!dev) { |
819 | xdma_err(xdev, "unable to find pci device"); |
820 | return -EINVAL; |
821 | } |
822 | |
823 | xdma_chan->desc_pool = dma_pool_create(name: dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE, |
824 | XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY); |
825 | if (!xdma_chan->desc_pool) { |
826 | xdma_err(xdev, "unable to allocate descriptor pool"); |
827 | return -ENOMEM; |
828 | } |
829 | |
830 | return 0; |
831 | } |
832 | |
833 | static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
834 | struct dma_tx_state *state) |
835 | { |
836 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
837 | struct xdma_desc *desc = NULL; |
838 | struct virt_dma_desc *vd; |
839 | enum dma_status ret; |
840 | unsigned long flags; |
841 | unsigned int period_idx; |
842 | u32 residue = 0; |
843 | |
844 | ret = dma_cookie_status(chan, cookie, state); |
845 | if (ret == DMA_COMPLETE) |
846 | return ret; |
847 | |
848 | spin_lock_irqsave(&xdma_chan->vchan.lock, flags); |
849 | |
850 | vd = vchan_find_desc(&xdma_chan->vchan, cookie); |
851 | if (!vd) |
852 | goto out; |
853 | |
854 | desc = to_xdma_desc(vdesc: vd); |
855 | if (desc->error) { |
856 | ret = DMA_ERROR; |
857 | } else if (desc->cyclic) { |
858 | period_idx = desc->completed_desc_num % desc->periods; |
859 | residue = (desc->periods - period_idx) * desc->period_size; |
860 | dma_set_residue(state, residue); |
861 | } |
862 | out: |
863 | spin_unlock_irqrestore(lock: &xdma_chan->vchan.lock, flags); |
864 | |
865 | return ret; |
866 | } |
867 | |
868 | /** |
869 | * xdma_channel_isr - XDMA channel interrupt handler |
870 | * @irq: IRQ number |
871 | * @dev_id: Pointer to the DMA channel structure |
872 | */ |
873 | static irqreturn_t xdma_channel_isr(int irq, void *dev_id) |
874 | { |
875 | struct xdma_chan *xchan = dev_id; |
876 | u32 complete_desc_num = 0; |
877 | struct xdma_device *xdev = xchan->xdev_hdl; |
878 | struct virt_dma_desc *vd, *next_vd; |
879 | struct xdma_desc *desc; |
880 | int ret; |
881 | u32 st; |
882 | bool repeat_tx; |
883 | |
884 | spin_lock(lock: &xchan->vchan.lock); |
885 | |
886 | if (xchan->stop_requested) |
887 | complete(&xchan->last_interrupt); |
888 | |
889 | /* get submitted request */ |
890 | vd = vchan_next_desc(vc: &xchan->vchan); |
891 | if (!vd) |
892 | goto out; |
893 | |
894 | /* Clear-on-read the status register */ |
895 | ret = regmap_read(map: xdev->rmap, reg: xchan->base + XDMA_CHAN_STATUS_RC, val: &st); |
896 | if (ret) |
897 | goto out; |
898 | |
899 | desc = to_xdma_desc(vdesc: vd); |
900 | |
901 | st &= XDMA_CHAN_STATUS_MASK; |
902 | if ((st & XDMA_CHAN_ERROR_MASK) || |
903 | !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) { |
904 | desc->error = true; |
905 | xdma_err(xdev, "channel error, status register value: 0x%x", st); |
906 | goto out; |
907 | } |
908 | |
909 | ret = regmap_read(map: xdev->rmap, reg: xchan->base + XDMA_CHAN_COMPLETED_DESC, |
910 | val: &complete_desc_num); |
911 | if (ret) |
912 | goto out; |
913 | |
914 | if (desc->interleaved_dma) { |
915 | xchan->busy = false; |
916 | desc->completed_desc_num += complete_desc_num; |
917 | if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) { |
918 | xdma_xfer_start(xchan); |
919 | goto out; |
920 | } |
921 | |
922 | /* last desc of any frame */ |
923 | desc->frames_left--; |
924 | if (desc->frames_left) |
925 | goto out; |
926 | |
927 | /* last desc of the last frame */ |
928 | repeat_tx = vd->tx.flags & DMA_PREP_REPEAT; |
929 | next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node); |
930 | if (next_vd) |
931 | repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT); |
932 | if (repeat_tx) { |
933 | desc->frames_left = desc->periods; |
934 | desc->completed_desc_num = 0; |
935 | vchan_cyclic_callback(vd); |
936 | } else { |
937 | list_del(entry: &vd->node); |
938 | vchan_cookie_complete(vd); |
939 | } |
940 | /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */ |
941 | xdma_xfer_start(xchan); |
942 | } else if (!desc->cyclic) { |
943 | xchan->busy = false; |
944 | desc->completed_desc_num += complete_desc_num; |
945 | |
946 | /* if all data blocks are transferred, remove and complete the request */ |
947 | if (desc->completed_desc_num == desc->desc_num) { |
948 | list_del(entry: &vd->node); |
949 | vchan_cookie_complete(vd); |
950 | goto out; |
951 | } |
952 | |
953 | if (desc->completed_desc_num > desc->desc_num || |
954 | complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) |
955 | goto out; |
956 | |
957 | /* transfer the rest of data */ |
958 | xdma_xfer_start(xchan); |
959 | } else { |
960 | desc->completed_desc_num = complete_desc_num; |
961 | vchan_cyclic_callback(vd); |
962 | } |
963 | |
964 | out: |
965 | spin_unlock(lock: &xchan->vchan.lock); |
966 | return IRQ_HANDLED; |
967 | } |
968 | |
969 | /** |
970 | * xdma_irq_fini - Uninitialize IRQ |
971 | * @xdev: DMA device pointer |
972 | */ |
973 | static void xdma_irq_fini(struct xdma_device *xdev) |
974 | { |
975 | int i; |
976 | |
977 | /* disable interrupt */ |
978 | regmap_write(map: xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, val: ~0); |
979 | |
980 | /* free irq handler */ |
981 | for (i = 0; i < xdev->h2c_chan_num; i++) |
982 | free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); |
983 | |
984 | for (i = 0; i < xdev->c2h_chan_num; i++) |
985 | free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]); |
986 | } |
987 | |
988 | /** |
989 | * xdma_set_vector_reg - configure hardware IRQ registers |
990 | * @xdev: DMA device pointer |
991 | * @vec_tbl_start: Start of IRQ registers |
992 | * @irq_start: Start of IRQ |
993 | * @irq_num: Number of IRQ |
994 | */ |
995 | static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start, |
996 | u32 irq_start, u32 irq_num) |
997 | { |
998 | u32 shift, i, val = 0; |
999 | int ret; |
1000 | |
1001 | /* Each IRQ register is 32 bit and contains 4 IRQs */ |
1002 | while (irq_num > 0) { |
1003 | for (i = 0; i < 4; i++) { |
1004 | shift = XDMA_IRQ_VEC_SHIFT * i; |
1005 | val |= irq_start << shift; |
1006 | irq_start++; |
1007 | irq_num--; |
1008 | if (!irq_num) |
1009 | break; |
1010 | } |
1011 | |
1012 | /* write IRQ register */ |
1013 | ret = regmap_write(map: xdev->rmap, reg: vec_tbl_start, val); |
1014 | if (ret) |
1015 | return ret; |
1016 | vec_tbl_start += sizeof(u32); |
1017 | val = 0; |
1018 | } |
1019 | |
1020 | return 0; |
1021 | } |
1022 | |
1023 | /** |
1024 | * xdma_irq_init - initialize IRQs |
1025 | * @xdev: DMA device pointer |
1026 | */ |
1027 | static int xdma_irq_init(struct xdma_device *xdev) |
1028 | { |
1029 | u32 irq = xdev->irq_start; |
1030 | u32 user_irq_start; |
1031 | int i, j, ret; |
1032 | |
1033 | /* return failure if there are not enough IRQs */ |
1034 | if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) { |
1035 | xdma_err(xdev, "not enough irq"); |
1036 | return -EINVAL; |
1037 | } |
1038 | |
1039 | /* setup H2C interrupt handler */ |
1040 | for (i = 0; i < xdev->h2c_chan_num; i++) { |
1041 | ret = request_irq(irq, handler: xdma_channel_isr, flags: 0, |
1042 | name: "xdma-h2c-channel", dev: &xdev->h2c_chans[i]); |
1043 | if (ret) { |
1044 | xdma_err(xdev, "H2C channel%d request irq%d failed: %d", |
1045 | i, irq, ret); |
1046 | goto failed_init_h2c; |
1047 | } |
1048 | xdev->h2c_chans[i].irq = irq; |
1049 | irq++; |
1050 | } |
1051 | |
1052 | /* setup C2H interrupt handler */ |
1053 | for (j = 0; j < xdev->c2h_chan_num; j++) { |
1054 | ret = request_irq(irq, handler: xdma_channel_isr, flags: 0, |
1055 | name: "xdma-c2h-channel", dev: &xdev->c2h_chans[j]); |
1056 | if (ret) { |
1057 | xdma_err(xdev, "C2H channel%d request irq%d failed: %d", |
1058 | j, irq, ret); |
1059 | goto failed_init_c2h; |
1060 | } |
1061 | xdev->c2h_chans[j].irq = irq; |
1062 | irq++; |
1063 | } |
1064 | |
1065 | /* config hardware IRQ registers */ |
1066 | ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, irq_start: 0, |
1067 | XDMA_CHAN_NUM(xdev)); |
1068 | if (ret) { |
1069 | xdma_err(xdev, "failed to set channel vectors: %d", ret); |
1070 | goto failed_init_c2h; |
1071 | } |
1072 | |
1073 | /* config user IRQ registers if needed */ |
1074 | user_irq_start = XDMA_CHAN_NUM(xdev); |
1075 | if (xdev->irq_num > user_irq_start) { |
1076 | ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM, |
1077 | irq_start: user_irq_start, |
1078 | irq_num: xdev->irq_num - user_irq_start); |
1079 | if (ret) { |
1080 | xdma_err(xdev, "failed to set user vectors: %d", ret); |
1081 | goto failed_init_c2h; |
1082 | } |
1083 | } |
1084 | |
1085 | /* enable interrupt */ |
1086 | ret = regmap_write(map: xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, val: ~0); |
1087 | if (ret) |
1088 | goto failed_init_c2h; |
1089 | |
1090 | return 0; |
1091 | |
1092 | failed_init_c2h: |
1093 | while (j--) |
1094 | free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]); |
1095 | failed_init_h2c: |
1096 | while (i--) |
1097 | free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]); |
1098 | |
1099 | return ret; |
1100 | } |
1101 | |
1102 | static bool xdma_filter_fn(struct dma_chan *chan, void *param) |
1103 | { |
1104 | struct xdma_chan *xdma_chan = to_xdma_chan(chan); |
1105 | struct xdma_chan_info *chan_info = param; |
1106 | |
1107 | return chan_info->dir == xdma_chan->dir; |
1108 | } |
1109 | |
1110 | /** |
1111 | * xdma_disable_user_irq - Disable user interrupt |
1112 | * @pdev: Pointer to the platform_device structure |
1113 | * @irq_num: System IRQ number |
1114 | */ |
1115 | void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num) |
1116 | { |
1117 | struct xdma_device *xdev = platform_get_drvdata(pdev); |
1118 | u32 index; |
1119 | |
1120 | index = irq_num - xdev->irq_start; |
1121 | if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { |
1122 | xdma_err(xdev, "invalid user irq number"); |
1123 | return; |
1124 | } |
1125 | index -= XDMA_CHAN_NUM(xdev); |
1126 | |
1127 | regmap_write(map: xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, val: 1 << index); |
1128 | } |
1129 | EXPORT_SYMBOL(xdma_disable_user_irq); |
1130 | |
1131 | /** |
1132 | * xdma_enable_user_irq - Enable user logic interrupt |
1133 | * @pdev: Pointer to the platform_device structure |
1134 | * @irq_num: System IRQ number |
1135 | */ |
1136 | int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num) |
1137 | { |
1138 | struct xdma_device *xdev = platform_get_drvdata(pdev); |
1139 | u32 index; |
1140 | int ret; |
1141 | |
1142 | index = irq_num - xdev->irq_start; |
1143 | if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) { |
1144 | xdma_err(xdev, "invalid user irq number"); |
1145 | return -EINVAL; |
1146 | } |
1147 | index -= XDMA_CHAN_NUM(xdev); |
1148 | |
1149 | ret = regmap_write(map: xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, val: 1 << index); |
1150 | if (ret) |
1151 | return ret; |
1152 | |
1153 | return 0; |
1154 | } |
1155 | EXPORT_SYMBOL(xdma_enable_user_irq); |
1156 | |
1157 | /** |
1158 | * xdma_get_user_irq - Get system IRQ number |
1159 | * @pdev: Pointer to the platform_device structure |
1160 | * @user_irq_index: User logic IRQ wire index |
1161 | * |
1162 | * Return: The system IRQ number allocated for the given wire index. |
1163 | */ |
1164 | int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index) |
1165 | { |
1166 | struct xdma_device *xdev = platform_get_drvdata(pdev); |
1167 | |
1168 | if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) { |
1169 | xdma_err(xdev, "invalid user irq index"); |
1170 | return -EINVAL; |
1171 | } |
1172 | |
1173 | return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index; |
1174 | } |
1175 | EXPORT_SYMBOL(xdma_get_user_irq); |
1176 | |
1177 | /** |
1178 | * xdma_remove - Driver remove function |
1179 | * @pdev: Pointer to the platform_device structure |
1180 | */ |
1181 | static void xdma_remove(struct platform_device *pdev) |
1182 | { |
1183 | struct xdma_device *xdev = platform_get_drvdata(pdev); |
1184 | |
1185 | if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX) |
1186 | xdma_irq_fini(xdev); |
1187 | |
1188 | if (xdev->status & XDMA_DEV_STATUS_REG_DMA) |
1189 | dma_async_device_unregister(device: &xdev->dma_dev); |
1190 | } |
1191 | |
1192 | /** |
1193 | * xdma_probe - Driver probe function |
1194 | * @pdev: Pointer to the platform_device structure |
1195 | */ |
1196 | static int xdma_probe(struct platform_device *pdev) |
1197 | { |
1198 | struct xdma_platdata *pdata = dev_get_platdata(dev: &pdev->dev); |
1199 | struct xdma_device *xdev; |
1200 | void __iomem *reg_base; |
1201 | struct resource *res; |
1202 | int ret = -ENODEV; |
1203 | |
1204 | if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) { |
1205 | dev_err(&pdev->dev, "invalid max dma channels %d", |
1206 | pdata->max_dma_channels); |
1207 | return -EINVAL; |
1208 | } |
1209 | |
1210 | xdev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*xdev), GFP_KERNEL); |
1211 | if (!xdev) |
1212 | return -ENOMEM; |
1213 | |
1214 | platform_set_drvdata(pdev, data: xdev); |
1215 | xdev->pdev = pdev; |
1216 | |
1217 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1218 | if (!res) { |
1219 | xdma_err(xdev, "failed to get irq resource"); |
1220 | goto failed; |
1221 | } |
1222 | xdev->irq_start = res->start; |
1223 | xdev->irq_num = resource_size(res); |
1224 | |
1225 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1226 | if (!res) { |
1227 | xdma_err(xdev, "failed to get io resource"); |
1228 | goto failed; |
1229 | } |
1230 | |
1231 | reg_base = devm_ioremap_resource(dev: &pdev->dev, res); |
1232 | if (IS_ERR(ptr: reg_base)) { |
1233 | xdma_err(xdev, "ioremap failed"); |
1234 | goto failed; |
1235 | } |
1236 | |
1237 | xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base, |
1238 | &xdma_regmap_config); |
1239 | if (!xdev->rmap) { |
1240 | xdma_err(xdev, "config regmap failed: %d", ret); |
1241 | goto failed; |
1242 | } |
1243 | INIT_LIST_HEAD(list: &xdev->dma_dev.channels); |
1244 | |
1245 | ret = xdma_alloc_channels(xdev, dir: DMA_MEM_TO_DEV); |
1246 | if (ret) { |
1247 | xdma_err(xdev, "config H2C channels failed: %d", ret); |
1248 | goto failed; |
1249 | } |
1250 | |
1251 | ret = xdma_alloc_channels(xdev, dir: DMA_DEV_TO_MEM); |
1252 | if (ret) { |
1253 | xdma_err(xdev, "config C2H channels failed: %d", ret); |
1254 | goto failed; |
1255 | } |
1256 | |
1257 | dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask); |
1258 | dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask); |
1259 | dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask); |
1260 | dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask); |
1261 | dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask); |
1262 | dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask); |
1263 | |
1264 | xdev->dma_dev.dev = &pdev->dev; |
1265 | xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; |
1266 | xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources; |
1267 | xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources; |
1268 | xdev->dma_dev.device_tx_status = xdma_tx_status; |
1269 | xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg; |
1270 | xdev->dma_dev.device_config = xdma_device_config; |
1271 | xdev->dma_dev.device_issue_pending = xdma_issue_pending; |
1272 | xdev->dma_dev.device_terminate_all = xdma_terminate_all; |
1273 | xdev->dma_dev.device_synchronize = xdma_synchronize; |
1274 | xdev->dma_dev.filter.map = pdata->device_map; |
1275 | xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt; |
1276 | xdev->dma_dev.filter.fn = xdma_filter_fn; |
1277 | xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic; |
1278 | xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma; |
1279 | |
1280 | ret = dma_async_device_register(device: &xdev->dma_dev); |
1281 | if (ret) { |
1282 | xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret); |
1283 | goto failed; |
1284 | } |
1285 | xdev->status |= XDMA_DEV_STATUS_REG_DMA; |
1286 | |
1287 | ret = xdma_irq_init(xdev); |
1288 | if (ret) { |
1289 | xdma_err(xdev, "failed to init msix: %d", ret); |
1290 | goto failed; |
1291 | } |
1292 | xdev->status |= XDMA_DEV_STATUS_INIT_MSIX; |
1293 | |
1294 | return 0; |
1295 | |
1296 | failed: |
1297 | xdma_remove(pdev); |
1298 | |
1299 | return ret; |
1300 | } |
1301 | |
1302 | static const struct platform_device_id xdma_id_table[] = { |
1303 | { "xdma", 0}, |
1304 | { }, |
1305 | }; |
1306 | MODULE_DEVICE_TABLE(platform, xdma_id_table); |
1307 | |
1308 | static struct platform_driver xdma_driver = { |
1309 | .driver = { |
1310 | .name = "xdma", |
1311 | }, |
1312 | .id_table = xdma_id_table, |
1313 | .probe = xdma_probe, |
1314 | .remove = xdma_remove, |
1315 | }; |
1316 | |
1317 | module_platform_driver(xdma_driver); |
1318 | |
1319 | MODULE_DESCRIPTION("AMD XDMA driver"); |
1320 | MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>"); |
1321 | MODULE_LICENSE("GPL"); |
1322 |
Definitions
- xdma_regmap_config
- xdma_desc_block
- xdma_chan
- xdma_desc
- xdma_device
- xdma_blk_last_desc
- xdma_link_sg_desc_blocks
- xdma_link_cyclic_desc_blocks
- to_xdma_chan
- to_xdma_desc
- xdma_channel_init
- xdma_free_desc
- xdma_alloc_desc
- xdma_xfer_start
- xdma_xfer_stop
- xdma_alloc_channels
- xdma_issue_pending
- xdma_terminate_all
- xdma_synchronize
- xdma_fill_descs
- xdma_prep_device_sg
- xdma_prep_dma_cyclic
- xdma_prep_interleaved_dma
- xdma_device_config
- xdma_free_chan_resources
- xdma_alloc_chan_resources
- xdma_tx_status
- xdma_channel_isr
- xdma_irq_fini
- xdma_set_vector_reg
- xdma_irq_init
- xdma_filter_fn
- xdma_disable_user_irq
- xdma_enable_user_irq
- xdma_get_user_irq
- xdma_remove
- xdma_probe
- xdma_id_table
Improve your Profiling and Debugging skills
Find out more