1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * DMA driver for STMicroelectronics STi FDMA controller |
4 | * |
5 | * Copyright (C) 2014 STMicroelectronics |
6 | * |
7 | * Author: Ludovic Barre <Ludovic.barre@st.com> |
8 | * Peter Griffin <peter.griffin@linaro.org> |
9 | */ |
10 | |
11 | #include <linux/init.h> |
12 | #include <linux/module.h> |
13 | #include <linux/of.h> |
14 | #include <linux/of_dma.h> |
15 | #include <linux/platform_device.h> |
16 | #include <linux/property.h> |
17 | #include <linux/interrupt.h> |
18 | #include <linux/remoteproc.h> |
19 | #include <linux/slab.h> |
20 | |
21 | #include "st_fdma.h" |
22 | |
23 | static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c) |
24 | { |
25 | return container_of(c, struct st_fdma_chan, vchan.chan); |
26 | } |
27 | |
28 | static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd) |
29 | { |
30 | return container_of(vd, struct st_fdma_desc, vdesc); |
31 | } |
32 | |
33 | static int st_fdma_dreq_get(struct st_fdma_chan *fchan) |
34 | { |
35 | struct st_fdma_dev *fdev = fchan->fdev; |
36 | u32 req_line_cfg = fchan->cfg.req_line; |
37 | u32 dreq_line; |
38 | int try = 0; |
39 | |
40 | /* |
41 | * dreq_mask is shared for n channels of fdma, so all accesses must be |
42 | * atomic. if the dreq_mask is changed between ffz and set_bit, |
43 | * we retry |
44 | */ |
45 | do { |
46 | if (fdev->dreq_mask == ~0L) { |
47 | dev_err(fdev->dev, "No req lines available\n" ); |
48 | return -EINVAL; |
49 | } |
50 | |
51 | if (try || req_line_cfg >= ST_FDMA_NR_DREQS) { |
52 | dev_err(fdev->dev, "Invalid or used req line\n" ); |
53 | return -EINVAL; |
54 | } else { |
55 | dreq_line = req_line_cfg; |
56 | } |
57 | |
58 | try++; |
59 | } while (test_and_set_bit(nr: dreq_line, addr: &fdev->dreq_mask)); |
60 | |
61 | dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n" , |
62 | dreq_line, fdev->dreq_mask); |
63 | |
64 | return dreq_line; |
65 | } |
66 | |
67 | static void st_fdma_dreq_put(struct st_fdma_chan *fchan) |
68 | { |
69 | struct st_fdma_dev *fdev = fchan->fdev; |
70 | |
71 | dev_dbg(fdev->dev, "put dreq_line:%#x\n" , fchan->dreq_line); |
72 | clear_bit(nr: fchan->dreq_line, addr: &fdev->dreq_mask); |
73 | } |
74 | |
75 | static void st_fdma_xfer_desc(struct st_fdma_chan *fchan) |
76 | { |
77 | struct virt_dma_desc *vdesc; |
78 | unsigned long nbytes, ch_cmd, cmd; |
79 | |
80 | vdesc = vchan_next_desc(vc: &fchan->vchan); |
81 | if (!vdesc) |
82 | return; |
83 | |
84 | fchan->fdesc = to_st_fdma_desc(vd: vdesc); |
85 | nbytes = fchan->fdesc->node[0].desc->nbytes; |
86 | cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id); |
87 | ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START; |
88 | |
89 | /* start the channel for the descriptor */ |
90 | fnode_write(fchan, nbytes, FDMA_CNTN_OFST); |
91 | fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST); |
92 | writel(val: cmd, |
93 | addr: fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST); |
94 | |
95 | dev_dbg(fchan->fdev->dev, "start chan:%d\n" , fchan->vchan.chan.chan_id); |
96 | } |
97 | |
98 | static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan, |
99 | unsigned long int_sta) |
100 | { |
101 | unsigned long ch_sta, ch_err; |
102 | int ch_id = fchan->vchan.chan.chan_id; |
103 | struct st_fdma_dev *fdev = fchan->fdev; |
104 | |
105 | ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST); |
106 | ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK; |
107 | ch_sta &= FDMA_CH_CMD_STA_MASK; |
108 | |
109 | if (int_sta & FDMA_INT_STA_ERR) { |
110 | dev_warn(fdev->dev, "chan:%d, error:%ld\n" , ch_id, ch_err); |
111 | fchan->status = DMA_ERROR; |
112 | return; |
113 | } |
114 | |
115 | switch (ch_sta) { |
116 | case FDMA_CH_CMD_STA_PAUSED: |
117 | fchan->status = DMA_PAUSED; |
118 | break; |
119 | |
120 | case FDMA_CH_CMD_STA_RUNNING: |
121 | fchan->status = DMA_IN_PROGRESS; |
122 | break; |
123 | } |
124 | } |
125 | |
126 | static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id) |
127 | { |
128 | struct st_fdma_dev *fdev = dev_id; |
129 | irqreturn_t ret = IRQ_NONE; |
130 | struct st_fdma_chan *fchan = &fdev->chans[0]; |
131 | unsigned long int_sta, clr; |
132 | |
133 | int_sta = fdma_read(fdev, FDMA_INT_STA_OFST); |
134 | clr = int_sta; |
135 | |
136 | for (; int_sta != 0 ; int_sta >>= 2, fchan++) { |
137 | if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR))) |
138 | continue; |
139 | |
140 | spin_lock(lock: &fchan->vchan.lock); |
141 | st_fdma_ch_sta_update(fchan, int_sta); |
142 | |
143 | if (fchan->fdesc) { |
144 | if (!fchan->fdesc->iscyclic) { |
145 | list_del(entry: &fchan->fdesc->vdesc.node); |
146 | vchan_cookie_complete(vd: &fchan->fdesc->vdesc); |
147 | fchan->fdesc = NULL; |
148 | fchan->status = DMA_COMPLETE; |
149 | } else { |
150 | vchan_cyclic_callback(vd: &fchan->fdesc->vdesc); |
151 | } |
152 | |
153 | /* Start the next descriptor (if available) */ |
154 | if (!fchan->fdesc) |
155 | st_fdma_xfer_desc(fchan); |
156 | } |
157 | |
158 | spin_unlock(lock: &fchan->vchan.lock); |
159 | ret = IRQ_HANDLED; |
160 | } |
161 | |
162 | fdma_write(fdev, clr, FDMA_INT_CLR_OFST); |
163 | |
164 | return ret; |
165 | } |
166 | |
167 | static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec, |
168 | struct of_dma *ofdma) |
169 | { |
170 | struct st_fdma_dev *fdev = ofdma->of_dma_data; |
171 | struct dma_chan *chan; |
172 | struct st_fdma_chan *fchan; |
173 | int ret; |
174 | |
175 | if (dma_spec->args_count < 1) |
176 | return ERR_PTR(error: -EINVAL); |
177 | |
178 | if (fdev->dma_device.dev->of_node != dma_spec->np) |
179 | return ERR_PTR(error: -EINVAL); |
180 | |
181 | ret = rproc_boot(rproc: fdev->slim_rproc->rproc); |
182 | if (ret == -ENOENT) |
183 | return ERR_PTR(error: -EPROBE_DEFER); |
184 | else if (ret) |
185 | return ERR_PTR(error: ret); |
186 | |
187 | chan = dma_get_any_slave_channel(device: &fdev->dma_device); |
188 | if (!chan) |
189 | goto err_chan; |
190 | |
191 | fchan = to_st_fdma_chan(c: chan); |
192 | |
193 | fchan->cfg.of_node = dma_spec->np; |
194 | fchan->cfg.req_line = dma_spec->args[0]; |
195 | fchan->cfg.req_ctrl = 0; |
196 | fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN; |
197 | |
198 | if (dma_spec->args_count > 1) |
199 | fchan->cfg.req_ctrl = dma_spec->args[1] |
200 | & FDMA_REQ_CTRL_CFG_MASK; |
201 | |
202 | if (dma_spec->args_count > 2) |
203 | fchan->cfg.type = dma_spec->args[2]; |
204 | |
205 | if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) { |
206 | fchan->dreq_line = 0; |
207 | } else { |
208 | fchan->dreq_line = st_fdma_dreq_get(fchan); |
209 | if (IS_ERR_VALUE(fchan->dreq_line)) { |
210 | chan = ERR_PTR(error: fchan->dreq_line); |
211 | goto err_chan; |
212 | } |
213 | } |
214 | |
215 | dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n" , |
216 | fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl); |
217 | |
218 | return chan; |
219 | |
220 | err_chan: |
221 | rproc_shutdown(rproc: fdev->slim_rproc->rproc); |
222 | return chan; |
223 | |
224 | } |
225 | |
226 | static void st_fdma_free_desc(struct virt_dma_desc *vdesc) |
227 | { |
228 | struct st_fdma_desc *fdesc; |
229 | int i; |
230 | |
231 | fdesc = to_st_fdma_desc(vd: vdesc); |
232 | for (i = 0; i < fdesc->n_nodes; i++) |
233 | dma_pool_free(pool: fdesc->fchan->node_pool, vaddr: fdesc->node[i].desc, |
234 | addr: fdesc->node[i].pdesc); |
235 | kfree(objp: fdesc); |
236 | } |
237 | |
238 | static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan, |
239 | int sg_len) |
240 | { |
241 | struct st_fdma_desc *fdesc; |
242 | int i; |
243 | |
244 | fdesc = kzalloc(struct_size(fdesc, node, sg_len), GFP_NOWAIT); |
245 | if (!fdesc) |
246 | return NULL; |
247 | |
248 | fdesc->fchan = fchan; |
249 | fdesc->n_nodes = sg_len; |
250 | for (i = 0; i < sg_len; i++) { |
251 | fdesc->node[i].desc = dma_pool_alloc(pool: fchan->node_pool, |
252 | GFP_NOWAIT, handle: &fdesc->node[i].pdesc); |
253 | if (!fdesc->node[i].desc) |
254 | goto err; |
255 | } |
256 | return fdesc; |
257 | |
258 | err: |
259 | while (--i >= 0) |
260 | dma_pool_free(pool: fchan->node_pool, vaddr: fdesc->node[i].desc, |
261 | addr: fdesc->node[i].pdesc); |
262 | kfree(objp: fdesc); |
263 | return NULL; |
264 | } |
265 | |
266 | static int st_fdma_alloc_chan_res(struct dma_chan *chan) |
267 | { |
268 | struct st_fdma_chan *fchan = to_st_fdma_chan(c: chan); |
269 | |
270 | /* Create the dma pool for descriptor allocation */ |
271 | fchan->node_pool = dma_pool_create(name: dev_name(dev: &chan->dev->device), |
272 | dev: fchan->fdev->dev, |
273 | size: sizeof(struct st_fdma_hw_node), |
274 | align: __alignof__(struct st_fdma_hw_node), |
275 | allocation: 0); |
276 | |
277 | if (!fchan->node_pool) { |
278 | dev_err(fchan->fdev->dev, "unable to allocate desc pool\n" ); |
279 | return -ENOMEM; |
280 | } |
281 | |
282 | dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n" , |
283 | fchan->vchan.chan.chan_id, fchan->cfg.type); |
284 | |
285 | return 0; |
286 | } |
287 | |
288 | static void st_fdma_free_chan_res(struct dma_chan *chan) |
289 | { |
290 | struct st_fdma_chan *fchan = to_st_fdma_chan(c: chan); |
291 | struct rproc *rproc = fchan->fdev->slim_rproc->rproc; |
292 | unsigned long flags; |
293 | |
294 | dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n" , |
295 | __func__, fchan->vchan.chan.chan_id); |
296 | |
297 | if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN) |
298 | st_fdma_dreq_put(fchan); |
299 | |
300 | spin_lock_irqsave(&fchan->vchan.lock, flags); |
301 | fchan->fdesc = NULL; |
302 | spin_unlock_irqrestore(lock: &fchan->vchan.lock, flags); |
303 | |
304 | dma_pool_destroy(pool: fchan->node_pool); |
305 | fchan->node_pool = NULL; |
306 | memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg)); |
307 | |
308 | rproc_shutdown(rproc); |
309 | } |
310 | |
311 | static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy( |
312 | struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, |
313 | size_t len, unsigned long flags) |
314 | { |
315 | struct st_fdma_chan *fchan; |
316 | struct st_fdma_desc *fdesc; |
317 | struct st_fdma_hw_node *hw_node; |
318 | |
319 | if (!len) |
320 | return NULL; |
321 | |
322 | fchan = to_st_fdma_chan(c: chan); |
323 | |
324 | /* We only require a single descriptor */ |
325 | fdesc = st_fdma_alloc_desc(fchan, sg_len: 1); |
326 | if (!fdesc) { |
327 | dev_err(fchan->fdev->dev, "no memory for desc\n" ); |
328 | return NULL; |
329 | } |
330 | |
331 | hw_node = fdesc->node[0].desc; |
332 | hw_node->next = 0; |
333 | hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN; |
334 | hw_node->control |= FDMA_NODE_CTRL_SRC_INCR; |
335 | hw_node->control |= FDMA_NODE_CTRL_DST_INCR; |
336 | hw_node->control |= FDMA_NODE_CTRL_INT_EON; |
337 | hw_node->nbytes = len; |
338 | hw_node->saddr = src; |
339 | hw_node->daddr = dst; |
340 | hw_node->generic.length = len; |
341 | hw_node->generic.sstride = 0; |
342 | hw_node->generic.dstride = 0; |
343 | |
344 | return vchan_tx_prep(vc: &fchan->vchan, vd: &fdesc->vdesc, tx_flags: flags); |
345 | } |
346 | |
347 | static int config_reqctrl(struct st_fdma_chan *fchan, |
348 | enum dma_transfer_direction direction) |
349 | { |
350 | u32 maxburst = 0, addr = 0; |
351 | enum dma_slave_buswidth width; |
352 | int ch_id = fchan->vchan.chan.chan_id; |
353 | struct st_fdma_dev *fdev = fchan->fdev; |
354 | |
355 | switch (direction) { |
356 | |
357 | case DMA_DEV_TO_MEM: |
358 | fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR; |
359 | maxburst = fchan->scfg.src_maxburst; |
360 | width = fchan->scfg.src_addr_width; |
361 | addr = fchan->scfg.src_addr; |
362 | break; |
363 | |
364 | case DMA_MEM_TO_DEV: |
365 | fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR; |
366 | maxburst = fchan->scfg.dst_maxburst; |
367 | width = fchan->scfg.dst_addr_width; |
368 | addr = fchan->scfg.dst_addr; |
369 | break; |
370 | |
371 | default: |
372 | return -EINVAL; |
373 | } |
374 | |
375 | fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK; |
376 | |
377 | switch (width) { |
378 | |
379 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
380 | fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1; |
381 | break; |
382 | |
383 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
384 | fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2; |
385 | break; |
386 | |
387 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
388 | fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4; |
389 | break; |
390 | |
391 | case DMA_SLAVE_BUSWIDTH_8_BYTES: |
392 | fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8; |
393 | break; |
394 | |
395 | default: |
396 | return -EINVAL; |
397 | } |
398 | |
399 | fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK; |
400 | fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1); |
401 | dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST); |
402 | |
403 | fchan->cfg.dev_addr = addr; |
404 | fchan->cfg.dir = direction; |
405 | |
406 | dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n" , |
407 | ch_id, addr, fchan->cfg.req_ctrl); |
408 | |
409 | return 0; |
410 | } |
411 | |
412 | static void fill_hw_node(struct st_fdma_hw_node *hw_node, |
413 | struct st_fdma_chan *fchan, |
414 | enum dma_transfer_direction direction) |
415 | { |
416 | if (direction == DMA_MEM_TO_DEV) { |
417 | hw_node->control |= FDMA_NODE_CTRL_SRC_INCR; |
418 | hw_node->control |= FDMA_NODE_CTRL_DST_STATIC; |
419 | hw_node->daddr = fchan->cfg.dev_addr; |
420 | } else { |
421 | hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC; |
422 | hw_node->control |= FDMA_NODE_CTRL_DST_INCR; |
423 | hw_node->saddr = fchan->cfg.dev_addr; |
424 | } |
425 | |
426 | hw_node->generic.sstride = 0; |
427 | hw_node->generic.dstride = 0; |
428 | } |
429 | |
430 | static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan, |
431 | size_t len, enum dma_transfer_direction direction) |
432 | { |
433 | struct st_fdma_chan *fchan; |
434 | |
435 | if (!chan || !len) |
436 | return NULL; |
437 | |
438 | fchan = to_st_fdma_chan(c: chan); |
439 | |
440 | if (!is_slave_direction(direction)) { |
441 | dev_err(fchan->fdev->dev, "bad direction?\n" ); |
442 | return NULL; |
443 | } |
444 | |
445 | return fchan; |
446 | } |
447 | |
448 | static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic( |
449 | struct dma_chan *chan, dma_addr_t buf_addr, size_t len, |
450 | size_t period_len, enum dma_transfer_direction direction, |
451 | unsigned long flags) |
452 | { |
453 | struct st_fdma_chan *fchan; |
454 | struct st_fdma_desc *fdesc; |
455 | int sg_len, i; |
456 | |
457 | fchan = st_fdma_prep_common(chan, len, direction); |
458 | if (!fchan) |
459 | return NULL; |
460 | |
461 | if (!period_len) |
462 | return NULL; |
463 | |
464 | if (config_reqctrl(fchan, direction)) { |
465 | dev_err(fchan->fdev->dev, "bad width or direction\n" ); |
466 | return NULL; |
467 | } |
468 | |
469 | /* the buffer length must be a multiple of period_len */ |
470 | if (len % period_len != 0) { |
471 | dev_err(fchan->fdev->dev, "len is not multiple of period\n" ); |
472 | return NULL; |
473 | } |
474 | |
475 | sg_len = len / period_len; |
476 | fdesc = st_fdma_alloc_desc(fchan, sg_len); |
477 | if (!fdesc) { |
478 | dev_err(fchan->fdev->dev, "no memory for desc\n" ); |
479 | return NULL; |
480 | } |
481 | |
482 | fdesc->iscyclic = true; |
483 | |
484 | for (i = 0; i < sg_len; i++) { |
485 | struct st_fdma_hw_node *hw_node = fdesc->node[i].desc; |
486 | |
487 | hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc; |
488 | |
489 | hw_node->control = |
490 | FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line); |
491 | hw_node->control |= FDMA_NODE_CTRL_INT_EON; |
492 | |
493 | fill_hw_node(hw_node, fchan, direction); |
494 | |
495 | if (direction == DMA_MEM_TO_DEV) |
496 | hw_node->saddr = buf_addr + (i * period_len); |
497 | else |
498 | hw_node->daddr = buf_addr + (i * period_len); |
499 | |
500 | hw_node->nbytes = period_len; |
501 | hw_node->generic.length = period_len; |
502 | } |
503 | |
504 | return vchan_tx_prep(vc: &fchan->vchan, vd: &fdesc->vdesc, tx_flags: flags); |
505 | } |
506 | |
507 | static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg( |
508 | struct dma_chan *chan, struct scatterlist *sgl, |
509 | unsigned int sg_len, enum dma_transfer_direction direction, |
510 | unsigned long flags, void *context) |
511 | { |
512 | struct st_fdma_chan *fchan; |
513 | struct st_fdma_desc *fdesc; |
514 | struct st_fdma_hw_node *hw_node; |
515 | struct scatterlist *sg; |
516 | int i; |
517 | |
518 | fchan = st_fdma_prep_common(chan, len: sg_len, direction); |
519 | if (!fchan) |
520 | return NULL; |
521 | |
522 | if (!sgl) |
523 | return NULL; |
524 | |
525 | fdesc = st_fdma_alloc_desc(fchan, sg_len); |
526 | if (!fdesc) { |
527 | dev_err(fchan->fdev->dev, "no memory for desc\n" ); |
528 | return NULL; |
529 | } |
530 | |
531 | fdesc->iscyclic = false; |
532 | |
533 | for_each_sg(sgl, sg, sg_len, i) { |
534 | hw_node = fdesc->node[i].desc; |
535 | |
536 | hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc; |
537 | hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line); |
538 | |
539 | fill_hw_node(hw_node, fchan, direction); |
540 | |
541 | if (direction == DMA_MEM_TO_DEV) |
542 | hw_node->saddr = sg_dma_address(sg); |
543 | else |
544 | hw_node->daddr = sg_dma_address(sg); |
545 | |
546 | hw_node->nbytes = sg_dma_len(sg); |
547 | hw_node->generic.length = sg_dma_len(sg); |
548 | } |
549 | |
550 | /* interrupt at end of last node */ |
551 | hw_node->control |= FDMA_NODE_CTRL_INT_EON; |
552 | |
553 | return vchan_tx_prep(vc: &fchan->vchan, vd: &fdesc->vdesc, tx_flags: flags); |
554 | } |
555 | |
556 | static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan, |
557 | struct virt_dma_desc *vdesc, |
558 | bool in_progress) |
559 | { |
560 | struct st_fdma_desc *fdesc = fchan->fdesc; |
561 | size_t residue = 0; |
562 | dma_addr_t cur_addr = 0; |
563 | int i; |
564 | |
565 | if (in_progress) { |
566 | cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST); |
567 | cur_addr &= FDMA_CH_CMD_DATA_MASK; |
568 | } |
569 | |
570 | for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) { |
571 | if (cur_addr == fdesc->node[i].pdesc) { |
572 | residue += fnode_read(fchan, FDMA_CNTN_OFST); |
573 | break; |
574 | } |
575 | residue += fdesc->node[i].desc->nbytes; |
576 | } |
577 | |
578 | return residue; |
579 | } |
580 | |
581 | static enum dma_status st_fdma_tx_status(struct dma_chan *chan, |
582 | dma_cookie_t cookie, |
583 | struct dma_tx_state *txstate) |
584 | { |
585 | struct st_fdma_chan *fchan = to_st_fdma_chan(c: chan); |
586 | struct virt_dma_desc *vd; |
587 | enum dma_status ret; |
588 | unsigned long flags; |
589 | |
590 | ret = dma_cookie_status(chan, cookie, state: txstate); |
591 | if (ret == DMA_COMPLETE || !txstate) |
592 | return ret; |
593 | |
594 | spin_lock_irqsave(&fchan->vchan.lock, flags); |
595 | vd = vchan_find_desc(&fchan->vchan, cookie); |
596 | if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie) |
597 | txstate->residue = st_fdma_desc_residue(fchan, vdesc: vd, in_progress: true); |
598 | else if (vd) |
599 | txstate->residue = st_fdma_desc_residue(fchan, vdesc: vd, in_progress: false); |
600 | else |
601 | txstate->residue = 0; |
602 | |
603 | spin_unlock_irqrestore(lock: &fchan->vchan.lock, flags); |
604 | |
605 | return ret; |
606 | } |
607 | |
608 | static void st_fdma_issue_pending(struct dma_chan *chan) |
609 | { |
610 | struct st_fdma_chan *fchan = to_st_fdma_chan(c: chan); |
611 | unsigned long flags; |
612 | |
613 | spin_lock_irqsave(&fchan->vchan.lock, flags); |
614 | |
615 | if (vchan_issue_pending(vc: &fchan->vchan) && !fchan->fdesc) |
616 | st_fdma_xfer_desc(fchan); |
617 | |
618 | spin_unlock_irqrestore(lock: &fchan->vchan.lock, flags); |
619 | } |
620 | |
621 | static int st_fdma_pause(struct dma_chan *chan) |
622 | { |
623 | unsigned long flags; |
624 | struct st_fdma_chan *fchan = to_st_fdma_chan(c: chan); |
625 | int ch_id = fchan->vchan.chan.chan_id; |
626 | unsigned long cmd = FDMA_CMD_PAUSE(ch_id); |
627 | |
628 | dev_dbg(fchan->fdev->dev, "pause chan:%d\n" , ch_id); |
629 | |
630 | spin_lock_irqsave(&fchan->vchan.lock, flags); |
631 | if (fchan->fdesc) |
632 | fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST); |
633 | spin_unlock_irqrestore(lock: &fchan->vchan.lock, flags); |
634 | |
635 | return 0; |
636 | } |
637 | |
638 | static int st_fdma_resume(struct dma_chan *chan) |
639 | { |
640 | unsigned long flags; |
641 | unsigned long val; |
642 | struct st_fdma_chan *fchan = to_st_fdma_chan(c: chan); |
643 | int ch_id = fchan->vchan.chan.chan_id; |
644 | |
645 | dev_dbg(fchan->fdev->dev, "resume chan:%d\n" , ch_id); |
646 | |
647 | spin_lock_irqsave(&fchan->vchan.lock, flags); |
648 | if (fchan->fdesc) { |
649 | val = fchan_read(fchan, FDMA_CH_CMD_OFST); |
650 | val &= FDMA_CH_CMD_DATA_MASK; |
651 | fchan_write(fchan, val, FDMA_CH_CMD_OFST); |
652 | } |
653 | spin_unlock_irqrestore(lock: &fchan->vchan.lock, flags); |
654 | |
655 | return 0; |
656 | } |
657 | |
658 | static int st_fdma_terminate_all(struct dma_chan *chan) |
659 | { |
660 | unsigned long flags; |
661 | LIST_HEAD(head); |
662 | struct st_fdma_chan *fchan = to_st_fdma_chan(c: chan); |
663 | int ch_id = fchan->vchan.chan.chan_id; |
664 | unsigned long cmd = FDMA_CMD_PAUSE(ch_id); |
665 | |
666 | dev_dbg(fchan->fdev->dev, "terminate chan:%d\n" , ch_id); |
667 | |
668 | spin_lock_irqsave(&fchan->vchan.lock, flags); |
669 | fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST); |
670 | fchan->fdesc = NULL; |
671 | vchan_get_all_descriptors(vc: &fchan->vchan, head: &head); |
672 | spin_unlock_irqrestore(lock: &fchan->vchan.lock, flags); |
673 | vchan_dma_desc_free_list(vc: &fchan->vchan, head: &head); |
674 | |
675 | return 0; |
676 | } |
677 | |
678 | static int st_fdma_slave_config(struct dma_chan *chan, |
679 | struct dma_slave_config *slave_cfg) |
680 | { |
681 | struct st_fdma_chan *fchan = to_st_fdma_chan(c: chan); |
682 | |
683 | memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg)); |
684 | return 0; |
685 | } |
686 | |
687 | static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = { |
688 | .name = "STiH407" , |
689 | .id = 0, |
690 | }; |
691 | |
692 | static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = { |
693 | .name = "STiH407" , |
694 | .id = 1, |
695 | }; |
696 | |
697 | static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = { |
698 | .name = "STiH407" , |
699 | .id = 2, |
700 | }; |
701 | |
702 | static const struct of_device_id st_fdma_match[] = { |
703 | { .compatible = "st,stih407-fdma-mpe31-11" |
704 | , .data = &fdma_mpe31_stih407_11 }, |
705 | { .compatible = "st,stih407-fdma-mpe31-12" |
706 | , .data = &fdma_mpe31_stih407_12 }, |
707 | { .compatible = "st,stih407-fdma-mpe31-13" |
708 | , .data = &fdma_mpe31_stih407_13 }, |
709 | {}, |
710 | }; |
711 | MODULE_DEVICE_TABLE(of, st_fdma_match); |
712 | |
713 | static int st_fdma_parse_dt(struct platform_device *pdev, |
714 | const struct st_fdma_driverdata *drvdata, |
715 | struct st_fdma_dev *fdev) |
716 | { |
717 | snprintf(buf: fdev->fw_name, FW_NAME_SIZE, fmt: "fdma_%s_%d.elf" , |
718 | drvdata->name, drvdata->id); |
719 | |
720 | return of_property_read_u32(np: pdev->dev.of_node, propname: "dma-channels" , |
721 | out_value: &fdev->nr_channels); |
722 | } |
723 | #define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ |
724 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
725 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ |
726 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) |
727 | |
728 | static void st_fdma_free(struct st_fdma_dev *fdev) |
729 | { |
730 | struct st_fdma_chan *fchan; |
731 | int i; |
732 | |
733 | for (i = 0; i < fdev->nr_channels; i++) { |
734 | fchan = &fdev->chans[i]; |
735 | list_del(entry: &fchan->vchan.chan.device_node); |
736 | tasklet_kill(t: &fchan->vchan.task); |
737 | } |
738 | } |
739 | |
740 | static int st_fdma_probe(struct platform_device *pdev) |
741 | { |
742 | struct st_fdma_dev *fdev; |
743 | struct device_node *np = pdev->dev.of_node; |
744 | const struct st_fdma_driverdata *drvdata; |
745 | int ret, i; |
746 | |
747 | drvdata = device_get_match_data(dev: &pdev->dev); |
748 | |
749 | fdev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*fdev), GFP_KERNEL); |
750 | if (!fdev) |
751 | return -ENOMEM; |
752 | |
753 | ret = st_fdma_parse_dt(pdev, drvdata, fdev); |
754 | if (ret) { |
755 | dev_err(&pdev->dev, "unable to find platform data\n" ); |
756 | goto err; |
757 | } |
758 | |
759 | fdev->chans = devm_kcalloc(dev: &pdev->dev, n: fdev->nr_channels, |
760 | size: sizeof(struct st_fdma_chan), GFP_KERNEL); |
761 | if (!fdev->chans) |
762 | return -ENOMEM; |
763 | |
764 | fdev->dev = &pdev->dev; |
765 | fdev->drvdata = drvdata; |
766 | platform_set_drvdata(pdev, data: fdev); |
767 | |
768 | fdev->irq = platform_get_irq(pdev, 0); |
769 | if (fdev->irq < 0) |
770 | return -EINVAL; |
771 | |
772 | ret = devm_request_irq(dev: &pdev->dev, irq: fdev->irq, handler: st_fdma_irq_handler, irqflags: 0, |
773 | devname: dev_name(dev: &pdev->dev), dev_id: fdev); |
774 | if (ret) { |
775 | dev_err(&pdev->dev, "Failed to request irq (%d)\n" , ret); |
776 | goto err; |
777 | } |
778 | |
779 | fdev->slim_rproc = st_slim_rproc_alloc(pdev, fw_name: fdev->fw_name); |
780 | if (IS_ERR(ptr: fdev->slim_rproc)) { |
781 | ret = PTR_ERR(ptr: fdev->slim_rproc); |
782 | dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n" , ret); |
783 | goto err; |
784 | } |
785 | |
786 | /* Initialise list of FDMA channels */ |
787 | INIT_LIST_HEAD(list: &fdev->dma_device.channels); |
788 | for (i = 0; i < fdev->nr_channels; i++) { |
789 | struct st_fdma_chan *fchan = &fdev->chans[i]; |
790 | |
791 | fchan->fdev = fdev; |
792 | fchan->vchan.desc_free = st_fdma_free_desc; |
793 | vchan_init(vc: &fchan->vchan, dmadev: &fdev->dma_device); |
794 | } |
795 | |
796 | /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */ |
797 | fdev->dreq_mask = BIT(0) | BIT(31); |
798 | |
799 | dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask); |
800 | dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask); |
801 | dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask); |
802 | |
803 | fdev->dma_device.dev = &pdev->dev; |
804 | fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res; |
805 | fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res; |
806 | fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic; |
807 | fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg; |
808 | fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy; |
809 | fdev->dma_device.device_tx_status = st_fdma_tx_status; |
810 | fdev->dma_device.device_issue_pending = st_fdma_issue_pending; |
811 | fdev->dma_device.device_terminate_all = st_fdma_terminate_all; |
812 | fdev->dma_device.device_config = st_fdma_slave_config; |
813 | fdev->dma_device.device_pause = st_fdma_pause; |
814 | fdev->dma_device.device_resume = st_fdma_resume; |
815 | |
816 | fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS; |
817 | fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS; |
818 | fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
819 | fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
820 | |
821 | ret = dmaenginem_async_device_register(device: &fdev->dma_device); |
822 | if (ret) { |
823 | dev_err(&pdev->dev, |
824 | "Failed to register DMA device (%d)\n" , ret); |
825 | goto err_rproc; |
826 | } |
827 | |
828 | ret = of_dma_controller_register(np, of_dma_xlate: st_fdma_of_xlate, data: fdev); |
829 | if (ret) { |
830 | dev_err(&pdev->dev, |
831 | "Failed to register controller (%d)\n" , ret); |
832 | goto err_rproc; |
833 | } |
834 | |
835 | dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n" , fdev->irq); |
836 | |
837 | return 0; |
838 | |
839 | err_rproc: |
840 | st_fdma_free(fdev); |
841 | st_slim_rproc_put(slim_rproc: fdev->slim_rproc); |
842 | err: |
843 | return ret; |
844 | } |
845 | |
846 | static void st_fdma_remove(struct platform_device *pdev) |
847 | { |
848 | struct st_fdma_dev *fdev = platform_get_drvdata(pdev); |
849 | |
850 | devm_free_irq(dev: &pdev->dev, irq: fdev->irq, dev_id: fdev); |
851 | st_slim_rproc_put(slim_rproc: fdev->slim_rproc); |
852 | of_dma_controller_free(np: pdev->dev.of_node); |
853 | } |
854 | |
855 | static struct platform_driver st_fdma_platform_driver = { |
856 | .driver = { |
857 | .name = DRIVER_NAME, |
858 | .of_match_table = st_fdma_match, |
859 | }, |
860 | .probe = st_fdma_probe, |
861 | .remove_new = st_fdma_remove, |
862 | }; |
863 | module_platform_driver(st_fdma_platform_driver); |
864 | |
865 | MODULE_LICENSE("GPL v2" ); |
866 | MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver" ); |
867 | MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>" ); |
868 | MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>" ); |
869 | MODULE_ALIAS("platform:" DRIVER_NAME); |
870 | |