1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Driver for the Analog Devices AXI-DMAC core |
4 | * |
5 | * Copyright 2013-2019 Analog Devices Inc. |
6 | * Author: Lars-Peter Clausen <lars@metafoo.de> |
7 | */ |
8 | |
9 | #include <linux/bitfield.h> |
10 | #include <linux/clk.h> |
11 | #include <linux/device.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/dmaengine.h> |
14 | #include <linux/err.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/io.h> |
17 | #include <linux/kernel.h> |
18 | #include <linux/module.h> |
19 | #include <linux/of.h> |
20 | #include <linux/of_dma.h> |
21 | #include <linux/of_address.h> |
22 | #include <linux/platform_device.h> |
23 | #include <linux/regmap.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/fpga/adi-axi-common.h> |
26 | |
27 | #include <dt-bindings/dma/axi-dmac.h> |
28 | |
29 | #include "dmaengine.h" |
30 | #include "virt-dma.h" |
31 | |
32 | /* |
33 | * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has |
34 | * various instantiation parameters which decided the exact feature set support |
35 | * by the core. |
36 | * |
37 | * Each channel of the core has a source interface and a destination interface. |
38 | * The number of channels and the type of the channel interfaces is selected at |
39 | * configuration time. A interface can either be a connected to a central memory |
40 | * interconnect, which allows access to system memory, or it can be connected to |
41 | * a dedicated bus which is directly connected to a data port on a peripheral. |
42 | * Given that those are configuration options of the core that are selected when |
43 | * it is instantiated this means that they can not be changed by software at |
44 | * runtime. By extension this means that each channel is uni-directional. It can |
45 | * either be device to memory or memory to device, but not both. Also since the |
46 | * device side is a dedicated data bus only connected to a single peripheral |
47 | * there is no address than can or needs to be configured for the device side. |
48 | */ |
49 | |
50 | #define AXI_DMAC_REG_INTERFACE_DESC 0x10 |
51 | #define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12) |
52 | #define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x) |
53 | #define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8) |
54 | #define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x) |
55 | #define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4) |
56 | #define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x) |
57 | #define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0) |
58 | #define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x) |
59 | #define AXI_DMAC_REG_COHERENCY_DESC 0x14 |
60 | #define AXI_DMAC_DST_COHERENT_MSK BIT(0) |
61 | #define AXI_DMAC_DST_COHERENT_GET(x) FIELD_GET(AXI_DMAC_DST_COHERENT_MSK, x) |
62 | |
63 | #define AXI_DMAC_REG_IRQ_MASK 0x80 |
64 | #define AXI_DMAC_REG_IRQ_PENDING 0x84 |
65 | #define AXI_DMAC_REG_IRQ_SOURCE 0x88 |
66 | |
67 | #define AXI_DMAC_REG_CTRL 0x400 |
68 | #define AXI_DMAC_REG_TRANSFER_ID 0x404 |
69 | #define AXI_DMAC_REG_START_TRANSFER 0x408 |
70 | #define AXI_DMAC_REG_FLAGS 0x40c |
71 | #define AXI_DMAC_REG_DEST_ADDRESS 0x410 |
72 | #define AXI_DMAC_REG_SRC_ADDRESS 0x414 |
73 | #define AXI_DMAC_REG_X_LENGTH 0x418 |
74 | #define AXI_DMAC_REG_Y_LENGTH 0x41c |
75 | #define AXI_DMAC_REG_DEST_STRIDE 0x420 |
76 | #define AXI_DMAC_REG_SRC_STRIDE 0x424 |
77 | #define AXI_DMAC_REG_TRANSFER_DONE 0x428 |
78 | #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c |
79 | #define AXI_DMAC_REG_STATUS 0x430 |
80 | #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 |
81 | #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 |
82 | #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c |
83 | #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450 |
84 | #define AXI_DMAC_REG_CURRENT_SG_ID 0x454 |
85 | #define AXI_DMAC_REG_SG_ADDRESS 0x47c |
86 | #define AXI_DMAC_REG_SG_ADDRESS_HIGH 0x4bc |
87 | |
88 | #define AXI_DMAC_CTRL_ENABLE BIT(0) |
89 | #define AXI_DMAC_CTRL_PAUSE BIT(1) |
90 | #define AXI_DMAC_CTRL_ENABLE_SG BIT(2) |
91 | |
92 | #define AXI_DMAC_IRQ_SOT BIT(0) |
93 | #define AXI_DMAC_IRQ_EOT BIT(1) |
94 | |
95 | #define AXI_DMAC_FLAG_CYCLIC BIT(0) |
96 | #define AXI_DMAC_FLAG_LAST BIT(1) |
97 | #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2) |
98 | |
99 | #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31) |
100 | |
101 | /* The maximum ID allocated by the hardware is 31 */ |
102 | #define AXI_DMAC_SG_UNUSED 32U |
103 | |
104 | /* Flags for axi_dmac_hw_desc.flags */ |
105 | #define AXI_DMAC_HW_FLAG_LAST BIT(0) |
106 | #define AXI_DMAC_HW_FLAG_IRQ BIT(1) |
107 | |
108 | struct axi_dmac_hw_desc { |
109 | u32 flags; |
110 | u32 id; |
111 | u64 dest_addr; |
112 | u64 src_addr; |
113 | u64 next_sg_addr; |
114 | u32 y_len; |
115 | u32 x_len; |
116 | u32 src_stride; |
117 | u32 dst_stride; |
118 | u64 __pad[2]; |
119 | }; |
120 | |
121 | struct axi_dmac_sg { |
122 | unsigned int partial_len; |
123 | bool schedule_when_free; |
124 | |
125 | struct axi_dmac_hw_desc *hw; |
126 | dma_addr_t hw_phys; |
127 | }; |
128 | |
129 | struct axi_dmac_desc { |
130 | struct virt_dma_desc vdesc; |
131 | struct axi_dmac_chan *chan; |
132 | |
133 | bool cyclic; |
134 | bool have_partial_xfer; |
135 | |
136 | unsigned int num_submitted; |
137 | unsigned int num_completed; |
138 | unsigned int num_sgs; |
139 | struct axi_dmac_sg sg[] __counted_by(num_sgs); |
140 | }; |
141 | |
142 | struct axi_dmac_chan { |
143 | struct virt_dma_chan vchan; |
144 | |
145 | struct axi_dmac_desc *next_desc; |
146 | struct list_head active_descs; |
147 | enum dma_transfer_direction direction; |
148 | |
149 | unsigned int src_width; |
150 | unsigned int dest_width; |
151 | unsigned int src_type; |
152 | unsigned int dest_type; |
153 | |
154 | unsigned int max_length; |
155 | unsigned int address_align_mask; |
156 | unsigned int length_align_mask; |
157 | |
158 | bool hw_partial_xfer; |
159 | bool hw_cyclic; |
160 | bool hw_2d; |
161 | bool hw_sg; |
162 | }; |
163 | |
164 | struct axi_dmac { |
165 | void __iomem *base; |
166 | int irq; |
167 | |
168 | struct clk *clk; |
169 | |
170 | struct dma_device dma_dev; |
171 | struct axi_dmac_chan chan; |
172 | }; |
173 | |
174 | static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan) |
175 | { |
176 | return container_of(chan->vchan.chan.device, struct axi_dmac, |
177 | dma_dev); |
178 | } |
179 | |
180 | static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c) |
181 | { |
182 | return container_of(c, struct axi_dmac_chan, vchan.chan); |
183 | } |
184 | |
185 | static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc) |
186 | { |
187 | return container_of(vdesc, struct axi_dmac_desc, vdesc); |
188 | } |
189 | |
190 | static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg, |
191 | unsigned int val) |
192 | { |
193 | writel(val, addr: axi_dmac->base + reg); |
194 | } |
195 | |
196 | static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg) |
197 | { |
198 | return readl(addr: axi_dmac->base + reg); |
199 | } |
200 | |
201 | static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan) |
202 | { |
203 | return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM; |
204 | } |
205 | |
206 | static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) |
207 | { |
208 | return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM; |
209 | } |
210 | |
211 | static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) |
212 | { |
213 | if (len == 0) |
214 | return false; |
215 | if ((len & chan->length_align_mask) != 0) /* Not aligned */ |
216 | return false; |
217 | return true; |
218 | } |
219 | |
220 | static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) |
221 | { |
222 | if ((addr & chan->address_align_mask) != 0) /* Not aligned */ |
223 | return false; |
224 | return true; |
225 | } |
226 | |
227 | static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) |
228 | { |
229 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); |
230 | struct virt_dma_desc *vdesc; |
231 | struct axi_dmac_desc *desc; |
232 | struct axi_dmac_sg *sg; |
233 | unsigned int flags = 0; |
234 | unsigned int val; |
235 | |
236 | if (!chan->hw_sg) { |
237 | val = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_START_TRANSFER); |
238 | if (val) /* Queue is full, wait for the next SOT IRQ */ |
239 | return; |
240 | } |
241 | |
242 | desc = chan->next_desc; |
243 | |
244 | if (!desc) { |
245 | vdesc = vchan_next_desc(vc: &chan->vchan); |
246 | if (!vdesc) |
247 | return; |
248 | list_move_tail(list: &vdesc->node, head: &chan->active_descs); |
249 | desc = to_axi_dmac_desc(vdesc); |
250 | } |
251 | sg = &desc->sg[desc->num_submitted]; |
252 | |
253 | /* Already queued in cyclic mode. Wait for it to finish */ |
254 | if (sg->hw->id != AXI_DMAC_SG_UNUSED) { |
255 | sg->schedule_when_free = true; |
256 | return; |
257 | } |
258 | |
259 | if (chan->hw_sg) { |
260 | chan->next_desc = NULL; |
261 | } else if (++desc->num_submitted == desc->num_sgs || |
262 | desc->have_partial_xfer) { |
263 | if (desc->cyclic) |
264 | desc->num_submitted = 0; /* Start again */ |
265 | else |
266 | chan->next_desc = NULL; |
267 | flags |= AXI_DMAC_FLAG_LAST; |
268 | } else { |
269 | chan->next_desc = desc; |
270 | } |
271 | |
272 | sg->hw->id = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_TRANSFER_ID); |
273 | |
274 | if (!chan->hw_sg) { |
275 | if (axi_dmac_dest_is_mem(chan)) { |
276 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_DEST_ADDRESS, val: sg->hw->dest_addr); |
277 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_DEST_STRIDE, val: sg->hw->dst_stride); |
278 | } |
279 | |
280 | if (axi_dmac_src_is_mem(chan)) { |
281 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_SRC_ADDRESS, val: sg->hw->src_addr); |
282 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_SRC_STRIDE, val: sg->hw->src_stride); |
283 | } |
284 | } |
285 | |
286 | /* |
287 | * If the hardware supports cyclic transfers and there is no callback to |
288 | * call, enable hw cyclic mode to avoid unnecessary interrupts. |
289 | */ |
290 | if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) { |
291 | if (chan->hw_sg) |
292 | desc->sg[desc->num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_IRQ; |
293 | else if (desc->num_sgs == 1) |
294 | flags |= AXI_DMAC_FLAG_CYCLIC; |
295 | } |
296 | |
297 | if (chan->hw_partial_xfer) |
298 | flags |= AXI_DMAC_FLAG_PARTIAL_REPORT; |
299 | |
300 | if (chan->hw_sg) { |
301 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_SG_ADDRESS, val: (u32)sg->hw_phys); |
302 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_SG_ADDRESS_HIGH, |
303 | val: (u64)sg->hw_phys >> 32); |
304 | } else { |
305 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_X_LENGTH, val: sg->hw->x_len); |
306 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_Y_LENGTH, val: sg->hw->y_len); |
307 | } |
308 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_FLAGS, val: flags); |
309 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_START_TRANSFER, val: 1); |
310 | } |
311 | |
312 | static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) |
313 | { |
314 | return list_first_entry_or_null(&chan->active_descs, |
315 | struct axi_dmac_desc, vdesc.node); |
316 | } |
317 | |
318 | static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan, |
319 | struct axi_dmac_sg *sg) |
320 | { |
321 | if (chan->hw_2d) |
322 | return (sg->hw->x_len + 1) * (sg->hw->y_len + 1); |
323 | else |
324 | return (sg->hw->x_len + 1); |
325 | } |
326 | |
327 | static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) |
328 | { |
329 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); |
330 | struct axi_dmac_desc *desc; |
331 | struct axi_dmac_sg *sg; |
332 | u32 xfer_done, len, id, i; |
333 | bool found_sg; |
334 | |
335 | do { |
336 | len = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN); |
337 | id = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_PARTIAL_XFER_ID); |
338 | |
339 | found_sg = false; |
340 | list_for_each_entry(desc, &chan->active_descs, vdesc.node) { |
341 | for (i = 0; i < desc->num_sgs; i++) { |
342 | sg = &desc->sg[i]; |
343 | if (sg->hw->id == AXI_DMAC_SG_UNUSED) |
344 | continue; |
345 | if (sg->hw->id == id) { |
346 | desc->have_partial_xfer = true; |
347 | sg->partial_len = len; |
348 | found_sg = true; |
349 | break; |
350 | } |
351 | } |
352 | if (found_sg) |
353 | break; |
354 | } |
355 | |
356 | if (found_sg) { |
357 | dev_dbg(dmac->dma_dev.dev, |
358 | "Found partial segment id=%u, len=%u\n" , |
359 | id, len); |
360 | } else { |
361 | dev_warn(dmac->dma_dev.dev, |
362 | "Not found partial segment id=%u, len=%u\n" , |
363 | id, len); |
364 | } |
365 | |
366 | /* Check if we have any more partial transfers */ |
367 | xfer_done = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_TRANSFER_DONE); |
368 | xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE); |
369 | |
370 | } while (!xfer_done); |
371 | } |
372 | |
373 | static void axi_dmac_compute_residue(struct axi_dmac_chan *chan, |
374 | struct axi_dmac_desc *active) |
375 | { |
376 | struct dmaengine_result *rslt = &active->vdesc.tx_result; |
377 | unsigned int start = active->num_completed - 1; |
378 | struct axi_dmac_sg *sg; |
379 | unsigned int i, total; |
380 | |
381 | rslt->result = DMA_TRANS_NOERROR; |
382 | rslt->residue = 0; |
383 | |
384 | if (chan->hw_sg) |
385 | return; |
386 | |
387 | /* |
388 | * We get here if the last completed segment is partial, which |
389 | * means we can compute the residue from that segment onwards |
390 | */ |
391 | for (i = start; i < active->num_sgs; i++) { |
392 | sg = &active->sg[i]; |
393 | total = axi_dmac_total_sg_bytes(chan, sg); |
394 | rslt->residue += (total - sg->partial_len); |
395 | } |
396 | } |
397 | |
398 | static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, |
399 | unsigned int completed_transfers) |
400 | { |
401 | struct axi_dmac_desc *active; |
402 | struct axi_dmac_sg *sg; |
403 | bool start_next = false; |
404 | |
405 | active = axi_dmac_active_desc(chan); |
406 | if (!active) |
407 | return false; |
408 | |
409 | if (chan->hw_partial_xfer && |
410 | (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE)) |
411 | axi_dmac_dequeue_partial_xfers(chan); |
412 | |
413 | if (chan->hw_sg) { |
414 | if (active->cyclic) { |
415 | vchan_cyclic_callback(vd: &active->vdesc); |
416 | } else { |
417 | list_del(entry: &active->vdesc.node); |
418 | vchan_cookie_complete(vd: &active->vdesc); |
419 | active = axi_dmac_active_desc(chan); |
420 | start_next = !!active; |
421 | } |
422 | } else { |
423 | do { |
424 | sg = &active->sg[active->num_completed]; |
425 | if (sg->hw->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ |
426 | break; |
427 | if (!(BIT(sg->hw->id) & completed_transfers)) |
428 | break; |
429 | active->num_completed++; |
430 | sg->hw->id = AXI_DMAC_SG_UNUSED; |
431 | if (sg->schedule_when_free) { |
432 | sg->schedule_when_free = false; |
433 | start_next = true; |
434 | } |
435 | |
436 | if (sg->partial_len) |
437 | axi_dmac_compute_residue(chan, active); |
438 | |
439 | if (active->cyclic) |
440 | vchan_cyclic_callback(vd: &active->vdesc); |
441 | |
442 | if (active->num_completed == active->num_sgs || |
443 | sg->partial_len) { |
444 | if (active->cyclic) { |
445 | active->num_completed = 0; /* wrap around */ |
446 | } else { |
447 | list_del(entry: &active->vdesc.node); |
448 | vchan_cookie_complete(vd: &active->vdesc); |
449 | active = axi_dmac_active_desc(chan); |
450 | } |
451 | } |
452 | } while (active); |
453 | } |
454 | |
455 | return start_next; |
456 | } |
457 | |
458 | static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) |
459 | { |
460 | struct axi_dmac *dmac = devid; |
461 | unsigned int pending; |
462 | bool start_next = false; |
463 | |
464 | pending = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_IRQ_PENDING); |
465 | if (!pending) |
466 | return IRQ_NONE; |
467 | |
468 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_IRQ_PENDING, val: pending); |
469 | |
470 | spin_lock(lock: &dmac->chan.vchan.lock); |
471 | /* One or more transfers have finished */ |
472 | if (pending & AXI_DMAC_IRQ_EOT) { |
473 | unsigned int completed; |
474 | |
475 | completed = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_TRANSFER_DONE); |
476 | start_next = axi_dmac_transfer_done(chan: &dmac->chan, completed_transfers: completed); |
477 | } |
478 | /* Space has become available in the descriptor queue */ |
479 | if ((pending & AXI_DMAC_IRQ_SOT) || start_next) |
480 | axi_dmac_start_transfer(chan: &dmac->chan); |
481 | spin_unlock(lock: &dmac->chan.vchan.lock); |
482 | |
483 | return IRQ_HANDLED; |
484 | } |
485 | |
486 | static int axi_dmac_terminate_all(struct dma_chan *c) |
487 | { |
488 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
489 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); |
490 | unsigned long flags; |
491 | LIST_HEAD(head); |
492 | |
493 | spin_lock_irqsave(&chan->vchan.lock, flags); |
494 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_CTRL, val: 0); |
495 | chan->next_desc = NULL; |
496 | vchan_get_all_descriptors(vc: &chan->vchan, head: &head); |
497 | list_splice_tail_init(list: &chan->active_descs, head: &head); |
498 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
499 | |
500 | vchan_dma_desc_free_list(vc: &chan->vchan, head: &head); |
501 | |
502 | return 0; |
503 | } |
504 | |
505 | static void axi_dmac_synchronize(struct dma_chan *c) |
506 | { |
507 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
508 | |
509 | vchan_synchronize(vc: &chan->vchan); |
510 | } |
511 | |
512 | static void axi_dmac_issue_pending(struct dma_chan *c) |
513 | { |
514 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
515 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); |
516 | unsigned long flags; |
517 | u32 ctrl = AXI_DMAC_CTRL_ENABLE; |
518 | |
519 | if (chan->hw_sg) |
520 | ctrl |= AXI_DMAC_CTRL_ENABLE_SG; |
521 | |
522 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_CTRL, val: ctrl); |
523 | |
524 | spin_lock_irqsave(&chan->vchan.lock, flags); |
525 | if (vchan_issue_pending(vc: &chan->vchan)) |
526 | axi_dmac_start_transfer(chan); |
527 | spin_unlock_irqrestore(lock: &chan->vchan.lock, flags); |
528 | } |
529 | |
530 | static struct axi_dmac_desc * |
531 | axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs) |
532 | { |
533 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); |
534 | struct device *dev = dmac->dma_dev.dev; |
535 | struct axi_dmac_hw_desc *hws; |
536 | struct axi_dmac_desc *desc; |
537 | dma_addr_t hw_phys; |
538 | unsigned int i; |
539 | |
540 | desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT); |
541 | if (!desc) |
542 | return NULL; |
543 | desc->num_sgs = num_sgs; |
544 | desc->chan = chan; |
545 | |
546 | hws = dma_alloc_coherent(dev, PAGE_ALIGN(num_sgs * sizeof(*hws)), |
547 | dma_handle: &hw_phys, GFP_ATOMIC); |
548 | if (!hws) { |
549 | kfree(objp: desc); |
550 | return NULL; |
551 | } |
552 | |
553 | for (i = 0; i < num_sgs; i++) { |
554 | desc->sg[i].hw = &hws[i]; |
555 | desc->sg[i].hw_phys = hw_phys + i * sizeof(*hws); |
556 | |
557 | hws[i].id = AXI_DMAC_SG_UNUSED; |
558 | hws[i].flags = 0; |
559 | |
560 | /* Link hardware descriptors */ |
561 | hws[i].next_sg_addr = hw_phys + (i + 1) * sizeof(*hws); |
562 | } |
563 | |
564 | /* The last hardware descriptor will trigger an interrupt */ |
565 | desc->sg[num_sgs - 1].hw->flags = AXI_DMAC_HW_FLAG_LAST | AXI_DMAC_HW_FLAG_IRQ; |
566 | |
567 | return desc; |
568 | } |
569 | |
570 | static void axi_dmac_free_desc(struct axi_dmac_desc *desc) |
571 | { |
572 | struct axi_dmac *dmac = chan_to_axi_dmac(chan: desc->chan); |
573 | struct device *dev = dmac->dma_dev.dev; |
574 | struct axi_dmac_hw_desc *hw = desc->sg[0].hw; |
575 | dma_addr_t hw_phys = desc->sg[0].hw_phys; |
576 | |
577 | dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)), |
578 | cpu_addr: hw, dma_handle: hw_phys); |
579 | kfree(objp: desc); |
580 | } |
581 | |
582 | static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, |
583 | enum dma_transfer_direction direction, dma_addr_t addr, |
584 | unsigned int num_periods, unsigned int period_len, |
585 | struct axi_dmac_sg *sg) |
586 | { |
587 | unsigned int num_segments, i; |
588 | unsigned int segment_size; |
589 | unsigned int len; |
590 | |
591 | /* Split into multiple equally sized segments if necessary */ |
592 | num_segments = DIV_ROUND_UP(period_len, chan->max_length); |
593 | segment_size = DIV_ROUND_UP(period_len, num_segments); |
594 | /* Take care of alignment */ |
595 | segment_size = ((segment_size - 1) | chan->length_align_mask) + 1; |
596 | |
597 | for (i = 0; i < num_periods; i++) { |
598 | for (len = period_len; len > segment_size; sg++) { |
599 | if (direction == DMA_DEV_TO_MEM) |
600 | sg->hw->dest_addr = addr; |
601 | else |
602 | sg->hw->src_addr = addr; |
603 | sg->hw->x_len = segment_size - 1; |
604 | sg->hw->y_len = 0; |
605 | sg->hw->flags = 0; |
606 | addr += segment_size; |
607 | len -= segment_size; |
608 | } |
609 | |
610 | if (direction == DMA_DEV_TO_MEM) |
611 | sg->hw->dest_addr = addr; |
612 | else |
613 | sg->hw->src_addr = addr; |
614 | sg->hw->x_len = len - 1; |
615 | sg->hw->y_len = 0; |
616 | sg++; |
617 | addr += len; |
618 | } |
619 | |
620 | return sg; |
621 | } |
622 | |
623 | static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( |
624 | struct dma_chan *c, struct scatterlist *sgl, |
625 | unsigned int sg_len, enum dma_transfer_direction direction, |
626 | unsigned long flags, void *context) |
627 | { |
628 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
629 | struct axi_dmac_desc *desc; |
630 | struct axi_dmac_sg *dsg; |
631 | struct scatterlist *sg; |
632 | unsigned int num_sgs; |
633 | unsigned int i; |
634 | |
635 | if (direction != chan->direction) |
636 | return NULL; |
637 | |
638 | num_sgs = 0; |
639 | for_each_sg(sgl, sg, sg_len, i) |
640 | num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length); |
641 | |
642 | desc = axi_dmac_alloc_desc(chan, num_sgs); |
643 | if (!desc) |
644 | return NULL; |
645 | |
646 | dsg = desc->sg; |
647 | |
648 | for_each_sg(sgl, sg, sg_len, i) { |
649 | if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || |
650 | !axi_dmac_check_len(chan, sg_dma_len(sg))) { |
651 | axi_dmac_free_desc(desc); |
652 | return NULL; |
653 | } |
654 | |
655 | dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), num_periods: 1, |
656 | sg_dma_len(sg), sg: dsg); |
657 | } |
658 | |
659 | desc->cyclic = false; |
660 | |
661 | return vchan_tx_prep(vc: &chan->vchan, vd: &desc->vdesc, tx_flags: flags); |
662 | } |
663 | |
664 | static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( |
665 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, |
666 | size_t period_len, enum dma_transfer_direction direction, |
667 | unsigned long flags) |
668 | { |
669 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
670 | struct axi_dmac_desc *desc; |
671 | unsigned int num_periods, num_segments, num_sgs; |
672 | |
673 | if (direction != chan->direction) |
674 | return NULL; |
675 | |
676 | if (!axi_dmac_check_len(chan, len: buf_len) || |
677 | !axi_dmac_check_addr(chan, addr: buf_addr)) |
678 | return NULL; |
679 | |
680 | if (period_len == 0 || buf_len % period_len) |
681 | return NULL; |
682 | |
683 | num_periods = buf_len / period_len; |
684 | num_segments = DIV_ROUND_UP(period_len, chan->max_length); |
685 | num_sgs = num_periods * num_segments; |
686 | |
687 | desc = axi_dmac_alloc_desc(chan, num_sgs); |
688 | if (!desc) |
689 | return NULL; |
690 | |
691 | /* Chain the last descriptor to the first, and remove its "last" flag */ |
692 | desc->sg[num_sgs - 1].hw->next_sg_addr = desc->sg[0].hw_phys; |
693 | desc->sg[num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_LAST; |
694 | |
695 | axi_dmac_fill_linear_sg(chan, direction, addr: buf_addr, num_periods, |
696 | period_len, sg: desc->sg); |
697 | |
698 | desc->cyclic = true; |
699 | |
700 | return vchan_tx_prep(vc: &chan->vchan, vd: &desc->vdesc, tx_flags: flags); |
701 | } |
702 | |
703 | static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( |
704 | struct dma_chan *c, struct dma_interleaved_template *xt, |
705 | unsigned long flags) |
706 | { |
707 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); |
708 | struct axi_dmac_desc *desc; |
709 | size_t dst_icg, src_icg; |
710 | |
711 | if (xt->frame_size != 1) |
712 | return NULL; |
713 | |
714 | if (xt->dir != chan->direction) |
715 | return NULL; |
716 | |
717 | if (axi_dmac_src_is_mem(chan)) { |
718 | if (!xt->src_inc || !axi_dmac_check_addr(chan, addr: xt->src_start)) |
719 | return NULL; |
720 | } |
721 | |
722 | if (axi_dmac_dest_is_mem(chan)) { |
723 | if (!xt->dst_inc || !axi_dmac_check_addr(chan, addr: xt->dst_start)) |
724 | return NULL; |
725 | } |
726 | |
727 | dst_icg = dmaengine_get_dst_icg(xt, chunk: &xt->sgl[0]); |
728 | src_icg = dmaengine_get_src_icg(xt, chunk: &xt->sgl[0]); |
729 | |
730 | if (chan->hw_2d) { |
731 | if (!axi_dmac_check_len(chan, len: xt->sgl[0].size) || |
732 | xt->numf == 0) |
733 | return NULL; |
734 | if (xt->sgl[0].size + dst_icg > chan->max_length || |
735 | xt->sgl[0].size + src_icg > chan->max_length) |
736 | return NULL; |
737 | } else { |
738 | if (dst_icg != 0 || src_icg != 0) |
739 | return NULL; |
740 | if (chan->max_length / xt->sgl[0].size < xt->numf) |
741 | return NULL; |
742 | if (!axi_dmac_check_len(chan, len: xt->sgl[0].size * xt->numf)) |
743 | return NULL; |
744 | } |
745 | |
746 | desc = axi_dmac_alloc_desc(chan, num_sgs: 1); |
747 | if (!desc) |
748 | return NULL; |
749 | |
750 | if (axi_dmac_src_is_mem(chan)) { |
751 | desc->sg[0].hw->src_addr = xt->src_start; |
752 | desc->sg[0].hw->src_stride = xt->sgl[0].size + src_icg; |
753 | } |
754 | |
755 | if (axi_dmac_dest_is_mem(chan)) { |
756 | desc->sg[0].hw->dest_addr = xt->dst_start; |
757 | desc->sg[0].hw->dst_stride = xt->sgl[0].size + dst_icg; |
758 | } |
759 | |
760 | if (chan->hw_2d) { |
761 | desc->sg[0].hw->x_len = xt->sgl[0].size - 1; |
762 | desc->sg[0].hw->y_len = xt->numf - 1; |
763 | } else { |
764 | desc->sg[0].hw->x_len = xt->sgl[0].size * xt->numf - 1; |
765 | desc->sg[0].hw->y_len = 0; |
766 | } |
767 | |
768 | if (flags & DMA_CYCLIC) |
769 | desc->cyclic = true; |
770 | |
771 | return vchan_tx_prep(vc: &chan->vchan, vd: &desc->vdesc, tx_flags: flags); |
772 | } |
773 | |
774 | static void axi_dmac_free_chan_resources(struct dma_chan *c) |
775 | { |
776 | vchan_free_chan_resources(vc: to_virt_chan(chan: c)); |
777 | } |
778 | |
779 | static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) |
780 | { |
781 | axi_dmac_free_desc(desc: to_axi_dmac_desc(vdesc)); |
782 | } |
783 | |
784 | static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg) |
785 | { |
786 | switch (reg) { |
787 | case AXI_DMAC_REG_IRQ_MASK: |
788 | case AXI_DMAC_REG_IRQ_SOURCE: |
789 | case AXI_DMAC_REG_IRQ_PENDING: |
790 | case AXI_DMAC_REG_CTRL: |
791 | case AXI_DMAC_REG_TRANSFER_ID: |
792 | case AXI_DMAC_REG_START_TRANSFER: |
793 | case AXI_DMAC_REG_FLAGS: |
794 | case AXI_DMAC_REG_DEST_ADDRESS: |
795 | case AXI_DMAC_REG_SRC_ADDRESS: |
796 | case AXI_DMAC_REG_X_LENGTH: |
797 | case AXI_DMAC_REG_Y_LENGTH: |
798 | case AXI_DMAC_REG_DEST_STRIDE: |
799 | case AXI_DMAC_REG_SRC_STRIDE: |
800 | case AXI_DMAC_REG_TRANSFER_DONE: |
801 | case AXI_DMAC_REG_ACTIVE_TRANSFER_ID: |
802 | case AXI_DMAC_REG_STATUS: |
803 | case AXI_DMAC_REG_CURRENT_SRC_ADDR: |
804 | case AXI_DMAC_REG_CURRENT_DEST_ADDR: |
805 | case AXI_DMAC_REG_PARTIAL_XFER_LEN: |
806 | case AXI_DMAC_REG_PARTIAL_XFER_ID: |
807 | case AXI_DMAC_REG_CURRENT_SG_ID: |
808 | case AXI_DMAC_REG_SG_ADDRESS: |
809 | case AXI_DMAC_REG_SG_ADDRESS_HIGH: |
810 | return true; |
811 | default: |
812 | return false; |
813 | } |
814 | } |
815 | |
816 | static const struct regmap_config axi_dmac_regmap_config = { |
817 | .reg_bits = 32, |
818 | .val_bits = 32, |
819 | .reg_stride = 4, |
820 | .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID, |
821 | .readable_reg = axi_dmac_regmap_rdwr, |
822 | .writeable_reg = axi_dmac_regmap_rdwr, |
823 | }; |
824 | |
825 | static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan) |
826 | { |
827 | chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1; |
828 | |
829 | if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) |
830 | chan->direction = DMA_MEM_TO_MEM; |
831 | else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) |
832 | chan->direction = DMA_MEM_TO_DEV; |
833 | else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan)) |
834 | chan->direction = DMA_DEV_TO_MEM; |
835 | else |
836 | chan->direction = DMA_DEV_TO_DEV; |
837 | } |
838 | |
839 | /* |
840 | * The configuration stored in the devicetree matches the configuration |
841 | * parameters of the peripheral instance and allows the driver to know which |
842 | * features are implemented and how it should behave. |
843 | */ |
844 | static int axi_dmac_parse_chan_dt(struct device_node *of_chan, |
845 | struct axi_dmac_chan *chan) |
846 | { |
847 | u32 val; |
848 | int ret; |
849 | |
850 | ret = of_property_read_u32(np: of_chan, propname: "reg" , out_value: &val); |
851 | if (ret) |
852 | return ret; |
853 | |
854 | /* We only support 1 channel for now */ |
855 | if (val != 0) |
856 | return -EINVAL; |
857 | |
858 | ret = of_property_read_u32(np: of_chan, propname: "adi,source-bus-type" , out_value: &val); |
859 | if (ret) |
860 | return ret; |
861 | if (val > AXI_DMAC_BUS_TYPE_FIFO) |
862 | return -EINVAL; |
863 | chan->src_type = val; |
864 | |
865 | ret = of_property_read_u32(np: of_chan, propname: "adi,destination-bus-type" , out_value: &val); |
866 | if (ret) |
867 | return ret; |
868 | if (val > AXI_DMAC_BUS_TYPE_FIFO) |
869 | return -EINVAL; |
870 | chan->dest_type = val; |
871 | |
872 | ret = of_property_read_u32(np: of_chan, propname: "adi,source-bus-width" , out_value: &val); |
873 | if (ret) |
874 | return ret; |
875 | chan->src_width = val / 8; |
876 | |
877 | ret = of_property_read_u32(np: of_chan, propname: "adi,destination-bus-width" , out_value: &val); |
878 | if (ret) |
879 | return ret; |
880 | chan->dest_width = val / 8; |
881 | |
882 | axi_dmac_adjust_chan_params(chan); |
883 | |
884 | return 0; |
885 | } |
886 | |
887 | static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac) |
888 | { |
889 | struct device_node *of_channels, *of_chan; |
890 | int ret; |
891 | |
892 | of_channels = of_get_child_by_name(node: dev->of_node, name: "adi,channels" ); |
893 | if (of_channels == NULL) |
894 | return -ENODEV; |
895 | |
896 | for_each_child_of_node(of_channels, of_chan) { |
897 | ret = axi_dmac_parse_chan_dt(of_chan, chan: &dmac->chan); |
898 | if (ret) { |
899 | of_node_put(node: of_chan); |
900 | of_node_put(node: of_channels); |
901 | return -EINVAL; |
902 | } |
903 | } |
904 | of_node_put(node: of_channels); |
905 | |
906 | return 0; |
907 | } |
908 | |
909 | static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac) |
910 | { |
911 | struct axi_dmac_chan *chan = &dmac->chan; |
912 | unsigned int val, desc; |
913 | |
914 | desc = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_INTERFACE_DESC); |
915 | if (desc == 0) { |
916 | dev_err(dev, "DMA interface register reads zero\n" ); |
917 | return -EFAULT; |
918 | } |
919 | |
920 | val = AXI_DMAC_DMA_SRC_TYPE_GET(desc); |
921 | if (val > AXI_DMAC_BUS_TYPE_FIFO) { |
922 | dev_err(dev, "Invalid source bus type read: %d\n" , val); |
923 | return -EINVAL; |
924 | } |
925 | chan->src_type = val; |
926 | |
927 | val = AXI_DMAC_DMA_DST_TYPE_GET(desc); |
928 | if (val > AXI_DMAC_BUS_TYPE_FIFO) { |
929 | dev_err(dev, "Invalid destination bus type read: %d\n" , val); |
930 | return -EINVAL; |
931 | } |
932 | chan->dest_type = val; |
933 | |
934 | val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc); |
935 | if (val == 0) { |
936 | dev_err(dev, "Source bus width is zero\n" ); |
937 | return -EINVAL; |
938 | } |
939 | /* widths are stored in log2 */ |
940 | chan->src_width = 1 << val; |
941 | |
942 | val = AXI_DMAC_DMA_DST_WIDTH_GET(desc); |
943 | if (val == 0) { |
944 | dev_err(dev, "Destination bus width is zero\n" ); |
945 | return -EINVAL; |
946 | } |
947 | chan->dest_width = 1 << val; |
948 | |
949 | axi_dmac_adjust_chan_params(chan); |
950 | |
951 | return 0; |
952 | } |
953 | |
954 | static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version) |
955 | { |
956 | struct axi_dmac_chan *chan = &dmac->chan; |
957 | |
958 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); |
959 | if (axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) |
960 | chan->hw_cyclic = true; |
961 | |
962 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_SG_ADDRESS, val: 0xffffffff); |
963 | if (axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_SG_ADDRESS)) |
964 | chan->hw_sg = true; |
965 | |
966 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_Y_LENGTH, val: 1); |
967 | if (axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_Y_LENGTH) == 1) |
968 | chan->hw_2d = true; |
969 | |
970 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_X_LENGTH, val: 0xffffffff); |
971 | chan->max_length = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_X_LENGTH); |
972 | if (chan->max_length != UINT_MAX) |
973 | chan->max_length++; |
974 | |
975 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_DEST_ADDRESS, val: 0xffffffff); |
976 | if (axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 && |
977 | chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) { |
978 | dev_err(dmac->dma_dev.dev, |
979 | "Destination memory-mapped interface not supported." ); |
980 | return -ENODEV; |
981 | } |
982 | |
983 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_SRC_ADDRESS, val: 0xffffffff); |
984 | if (axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 && |
985 | chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) { |
986 | dev_err(dmac->dma_dev.dev, |
987 | "Source memory-mapped interface not supported." ); |
988 | return -ENODEV; |
989 | } |
990 | |
991 | if (version >= ADI_AXI_PCORE_VER(4, 2, 'a')) |
992 | chan->hw_partial_xfer = true; |
993 | |
994 | if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) { |
995 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_X_LENGTH, val: 0x00); |
996 | chan->length_align_mask = |
997 | axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_X_LENGTH); |
998 | } else { |
999 | chan->length_align_mask = chan->address_align_mask; |
1000 | } |
1001 | |
1002 | return 0; |
1003 | } |
1004 | |
1005 | static int axi_dmac_probe(struct platform_device *pdev) |
1006 | { |
1007 | struct dma_device *dma_dev; |
1008 | struct axi_dmac *dmac; |
1009 | struct regmap *regmap; |
1010 | unsigned int version; |
1011 | u32 irq_mask = 0; |
1012 | int ret; |
1013 | |
1014 | dmac = devm_kzalloc(dev: &pdev->dev, size: sizeof(*dmac), GFP_KERNEL); |
1015 | if (!dmac) |
1016 | return -ENOMEM; |
1017 | |
1018 | dmac->irq = platform_get_irq(pdev, 0); |
1019 | if (dmac->irq < 0) |
1020 | return dmac->irq; |
1021 | if (dmac->irq == 0) |
1022 | return -EINVAL; |
1023 | |
1024 | dmac->base = devm_platform_ioremap_resource(pdev, index: 0); |
1025 | if (IS_ERR(ptr: dmac->base)) |
1026 | return PTR_ERR(ptr: dmac->base); |
1027 | |
1028 | dmac->clk = devm_clk_get(dev: &pdev->dev, NULL); |
1029 | if (IS_ERR(ptr: dmac->clk)) |
1030 | return PTR_ERR(ptr: dmac->clk); |
1031 | |
1032 | ret = clk_prepare_enable(clk: dmac->clk); |
1033 | if (ret < 0) |
1034 | return ret; |
1035 | |
1036 | version = axi_dmac_read(axi_dmac: dmac, ADI_AXI_REG_VERSION); |
1037 | |
1038 | if (version >= ADI_AXI_PCORE_VER(4, 3, 'a')) |
1039 | ret = axi_dmac_read_chan_config(dev: &pdev->dev, dmac); |
1040 | else |
1041 | ret = axi_dmac_parse_dt(dev: &pdev->dev, dmac); |
1042 | |
1043 | if (ret < 0) |
1044 | goto err_clk_disable; |
1045 | |
1046 | INIT_LIST_HEAD(list: &dmac->chan.active_descs); |
1047 | |
1048 | dma_set_max_seg_size(dev: &pdev->dev, UINT_MAX); |
1049 | |
1050 | dma_dev = &dmac->dma_dev; |
1051 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); |
1052 | dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); |
1053 | dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask); |
1054 | dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; |
1055 | dma_dev->device_tx_status = dma_cookie_status; |
1056 | dma_dev->device_issue_pending = axi_dmac_issue_pending; |
1057 | dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg; |
1058 | dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; |
1059 | dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; |
1060 | dma_dev->device_terminate_all = axi_dmac_terminate_all; |
1061 | dma_dev->device_synchronize = axi_dmac_synchronize; |
1062 | dma_dev->dev = &pdev->dev; |
1063 | dma_dev->src_addr_widths = BIT(dmac->chan.src_width); |
1064 | dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width); |
1065 | dma_dev->directions = BIT(dmac->chan.direction); |
1066 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; |
1067 | dma_dev->max_sg_burst = 31; /* 31 SGs maximum in one burst */ |
1068 | INIT_LIST_HEAD(list: &dma_dev->channels); |
1069 | |
1070 | dmac->chan.vchan.desc_free = axi_dmac_desc_free; |
1071 | vchan_init(vc: &dmac->chan.vchan, dmadev: dma_dev); |
1072 | |
1073 | ret = axi_dmac_detect_caps(dmac, version); |
1074 | if (ret) |
1075 | goto err_clk_disable; |
1076 | |
1077 | dma_dev->copy_align = (dmac->chan.address_align_mask + 1); |
1078 | |
1079 | if (dmac->chan.hw_sg) |
1080 | irq_mask |= AXI_DMAC_IRQ_SOT; |
1081 | |
1082 | axi_dmac_write(axi_dmac: dmac, AXI_DMAC_REG_IRQ_MASK, val: irq_mask); |
1083 | |
1084 | if (of_dma_is_coherent(np: pdev->dev.of_node)) { |
1085 | ret = axi_dmac_read(axi_dmac: dmac, AXI_DMAC_REG_COHERENCY_DESC); |
1086 | |
1087 | if (version < ADI_AXI_PCORE_VER(4, 4, 'a') || |
1088 | !AXI_DMAC_DST_COHERENT_GET(ret)) { |
1089 | dev_err(dmac->dma_dev.dev, |
1090 | "Coherent DMA not supported in hardware" ); |
1091 | ret = -EINVAL; |
1092 | goto err_clk_disable; |
1093 | } |
1094 | } |
1095 | |
1096 | ret = dma_async_device_register(device: dma_dev); |
1097 | if (ret) |
1098 | goto err_clk_disable; |
1099 | |
1100 | ret = of_dma_controller_register(np: pdev->dev.of_node, |
1101 | of_dma_xlate: of_dma_xlate_by_chan_id, data: dma_dev); |
1102 | if (ret) |
1103 | goto err_unregister_device; |
1104 | |
1105 | ret = request_irq(irq: dmac->irq, handler: axi_dmac_interrupt_handler, IRQF_SHARED, |
1106 | name: dev_name(dev: &pdev->dev), dev: dmac); |
1107 | if (ret) |
1108 | goto err_unregister_of; |
1109 | |
1110 | platform_set_drvdata(pdev, data: dmac); |
1111 | |
1112 | regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, |
1113 | &axi_dmac_regmap_config); |
1114 | if (IS_ERR(ptr: regmap)) { |
1115 | ret = PTR_ERR(ptr: regmap); |
1116 | goto err_free_irq; |
1117 | } |
1118 | |
1119 | return 0; |
1120 | |
1121 | err_free_irq: |
1122 | free_irq(dmac->irq, dmac); |
1123 | err_unregister_of: |
1124 | of_dma_controller_free(np: pdev->dev.of_node); |
1125 | err_unregister_device: |
1126 | dma_async_device_unregister(device: &dmac->dma_dev); |
1127 | err_clk_disable: |
1128 | clk_disable_unprepare(clk: dmac->clk); |
1129 | |
1130 | return ret; |
1131 | } |
1132 | |
1133 | static void axi_dmac_remove(struct platform_device *pdev) |
1134 | { |
1135 | struct axi_dmac *dmac = platform_get_drvdata(pdev); |
1136 | |
1137 | of_dma_controller_free(np: pdev->dev.of_node); |
1138 | free_irq(dmac->irq, dmac); |
1139 | tasklet_kill(t: &dmac->chan.vchan.task); |
1140 | dma_async_device_unregister(device: &dmac->dma_dev); |
1141 | clk_disable_unprepare(clk: dmac->clk); |
1142 | } |
1143 | |
1144 | static const struct of_device_id axi_dmac_of_match_table[] = { |
1145 | { .compatible = "adi,axi-dmac-1.00.a" }, |
1146 | { }, |
1147 | }; |
1148 | MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table); |
1149 | |
1150 | static struct platform_driver axi_dmac_driver = { |
1151 | .driver = { |
1152 | .name = "dma-axi-dmac" , |
1153 | .of_match_table = axi_dmac_of_match_table, |
1154 | }, |
1155 | .probe = axi_dmac_probe, |
1156 | .remove_new = axi_dmac_remove, |
1157 | }; |
1158 | module_platform_driver(axi_dmac_driver); |
1159 | |
1160 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>" ); |
1161 | MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller" ); |
1162 | MODULE_LICENSE("GPL v2" ); |
1163 | |