1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * drivers/dma/fsl-edma.c |
4 | * |
5 | * Copyright 2013-2014 Freescale Semiconductor, Inc. |
6 | * |
7 | * Driver for the Freescale eDMA engine with flexible channel multiplexing |
8 | * capability for DMA request sources. The eDMA block can be found on some |
9 | * Vybrid and Layerscape SoCs. |
10 | */ |
11 | |
12 | #include <dt-bindings/dma/fsl-edma.h> |
13 | #include <linux/bitfield.h> |
14 | #include <linux/module.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/clk.h> |
17 | #include <linux/of.h> |
18 | #include <linux/of_dma.h> |
19 | #include <linux/dma-mapping.h> |
20 | #include <linux/pm_runtime.h> |
21 | #include <linux/pm_domain.h> |
22 | #include <linux/property.h> |
23 | |
24 | #include "fsl-edma-common.h" |
25 | |
26 | static void fsl_edma_synchronize(struct dma_chan *chan) |
27 | { |
28 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
29 | |
30 | vchan_synchronize(vc: &fsl_chan->vchan); |
31 | } |
32 | |
33 | static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) |
34 | { |
35 | struct fsl_edma_engine *fsl_edma = dev_id; |
36 | unsigned int intr, ch; |
37 | struct edma_regs *regs = &fsl_edma->regs; |
38 | |
39 | intr = edma_readl(edma: fsl_edma, addr: regs->intl); |
40 | if (!intr) |
41 | return IRQ_NONE; |
42 | |
43 | for (ch = 0; ch < fsl_edma->n_chans; ch++) { |
44 | if (intr & (0x1 << ch)) { |
45 | edma_writeb(edma: fsl_edma, EDMA_CINT_CINT(ch), addr: regs->cint); |
46 | fsl_edma_tx_chan_handler(fsl_chan: &fsl_edma->chans[ch]); |
47 | } |
48 | } |
49 | return IRQ_HANDLED; |
50 | } |
51 | |
52 | static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id) |
53 | { |
54 | struct fsl_edma_chan *fsl_chan = dev_id; |
55 | unsigned int intr; |
56 | |
57 | intr = edma_readl_chreg(fsl_chan, ch_int); |
58 | if (!intr) |
59 | return IRQ_HANDLED; |
60 | |
61 | edma_writel_chreg(fsl_chan, 1, ch_int); |
62 | |
63 | fsl_edma_tx_chan_handler(fsl_chan); |
64 | |
65 | return IRQ_HANDLED; |
66 | } |
67 | |
68 | static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id) |
69 | { |
70 | struct fsl_edma_engine *fsl_edma = dev_id; |
71 | unsigned int err, ch; |
72 | struct edma_regs *regs = &fsl_edma->regs; |
73 | |
74 | err = edma_readl(edma: fsl_edma, addr: regs->errl); |
75 | if (!err) |
76 | return IRQ_NONE; |
77 | |
78 | for (ch = 0; ch < fsl_edma->n_chans; ch++) { |
79 | if (err & (0x1 << ch)) { |
80 | fsl_edma_disable_request(fsl_chan: &fsl_edma->chans[ch]); |
81 | edma_writeb(edma: fsl_edma, EDMA_CERR_CERR(ch), addr: regs->cerr); |
82 | fsl_edma_err_chan_handler(fsl_chan: &fsl_edma->chans[ch]); |
83 | } |
84 | } |
85 | return IRQ_HANDLED; |
86 | } |
87 | |
88 | static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id) |
89 | { |
90 | if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED) |
91 | return IRQ_HANDLED; |
92 | |
93 | return fsl_edma_err_handler(irq, dev_id); |
94 | } |
95 | |
96 | static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, |
97 | struct of_dma *ofdma) |
98 | { |
99 | struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; |
100 | struct dma_chan *chan, *_chan; |
101 | struct fsl_edma_chan *fsl_chan; |
102 | u32 dmamux_nr = fsl_edma->drvdata->dmamuxs; |
103 | unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr; |
104 | |
105 | if (dma_spec->args_count != 2) |
106 | return NULL; |
107 | |
108 | mutex_lock(&fsl_edma->fsl_edma_mutex); |
109 | list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { |
110 | if (chan->client_count) |
111 | continue; |
112 | if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) { |
113 | chan = dma_get_slave_channel(chan); |
114 | if (chan) { |
115 | chan->device->privatecnt++; |
116 | fsl_chan = to_fsl_edma_chan(chan); |
117 | fsl_chan->slave_id = dma_spec->args[1]; |
118 | fsl_edma_chan_mux(fsl_chan, slot: fsl_chan->slave_id, |
119 | enable: true); |
120 | mutex_unlock(lock: &fsl_edma->fsl_edma_mutex); |
121 | return chan; |
122 | } |
123 | } |
124 | } |
125 | mutex_unlock(lock: &fsl_edma->fsl_edma_mutex); |
126 | return NULL; |
127 | } |
128 | |
129 | static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec, |
130 | struct of_dma *ofdma) |
131 | { |
132 | struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; |
133 | struct dma_chan *chan, *_chan; |
134 | struct fsl_edma_chan *fsl_chan; |
135 | bool b_chmux; |
136 | int i; |
137 | |
138 | if (dma_spec->args_count != 3) |
139 | return NULL; |
140 | |
141 | b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX); |
142 | |
143 | mutex_lock(&fsl_edma->fsl_edma_mutex); |
144 | list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, |
145 | device_node) { |
146 | |
147 | if (chan->client_count) |
148 | continue; |
149 | |
150 | fsl_chan = to_fsl_edma_chan(chan); |
151 | i = fsl_chan - fsl_edma->chans; |
152 | |
153 | fsl_chan->priority = dma_spec->args[1]; |
154 | fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX; |
155 | fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE; |
156 | fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO; |
157 | |
158 | if ((dma_spec->args[2] & FSL_EDMA_EVEN_CH) && (i & 0x1)) |
159 | continue; |
160 | |
161 | if ((dma_spec->args[2] & FSL_EDMA_ODD_CH) && !(i & 0x1)) |
162 | continue; |
163 | |
164 | if (!b_chmux && i == dma_spec->args[0]) { |
165 | chan = dma_get_slave_channel(chan); |
166 | chan->device->privatecnt++; |
167 | mutex_unlock(lock: &fsl_edma->fsl_edma_mutex); |
168 | return chan; |
169 | } else if (b_chmux && !fsl_chan->srcid) { |
170 | /* if controller support channel mux, choose a free channel */ |
171 | chan = dma_get_slave_channel(chan); |
172 | chan->device->privatecnt++; |
173 | fsl_chan->srcid = dma_spec->args[0]; |
174 | mutex_unlock(lock: &fsl_edma->fsl_edma_mutex); |
175 | return chan; |
176 | } |
177 | } |
178 | mutex_unlock(lock: &fsl_edma->fsl_edma_mutex); |
179 | return NULL; |
180 | } |
181 | |
182 | static int |
183 | fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) |
184 | { |
185 | int ret; |
186 | |
187 | edma_writel(edma: fsl_edma, val: ~0, addr: fsl_edma->regs.intl); |
188 | |
189 | fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx" ); |
190 | if (fsl_edma->txirq < 0) |
191 | return fsl_edma->txirq; |
192 | |
193 | fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err" ); |
194 | if (fsl_edma->errirq < 0) |
195 | return fsl_edma->errirq; |
196 | |
197 | if (fsl_edma->txirq == fsl_edma->errirq) { |
198 | ret = devm_request_irq(dev: &pdev->dev, irq: fsl_edma->txirq, |
199 | handler: fsl_edma_irq_handler, irqflags: 0, devname: "eDMA" , dev_id: fsl_edma); |
200 | if (ret) { |
201 | dev_err(&pdev->dev, "Can't register eDMA IRQ.\n" ); |
202 | return ret; |
203 | } |
204 | } else { |
205 | ret = devm_request_irq(dev: &pdev->dev, irq: fsl_edma->txirq, |
206 | handler: fsl_edma_tx_handler, irqflags: 0, devname: "eDMA tx" , dev_id: fsl_edma); |
207 | if (ret) { |
208 | dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n" ); |
209 | return ret; |
210 | } |
211 | |
212 | ret = devm_request_irq(dev: &pdev->dev, irq: fsl_edma->errirq, |
213 | handler: fsl_edma_err_handler, irqflags: 0, devname: "eDMA err" , dev_id: fsl_edma); |
214 | if (ret) { |
215 | dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n" ); |
216 | return ret; |
217 | } |
218 | } |
219 | |
220 | return 0; |
221 | } |
222 | |
223 | static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) |
224 | { |
225 | int ret; |
226 | int i; |
227 | |
228 | for (i = 0; i < fsl_edma->n_chans; i++) { |
229 | |
230 | struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i]; |
231 | |
232 | if (fsl_edma->chan_masked & BIT(i)) |
233 | continue; |
234 | |
235 | /* request channel irq */ |
236 | fsl_chan->txirq = platform_get_irq(pdev, i); |
237 | if (fsl_chan->txirq < 0) |
238 | return -EINVAL; |
239 | |
240 | ret = devm_request_irq(dev: &pdev->dev, irq: fsl_chan->txirq, |
241 | handler: fsl_edma3_tx_handler, IRQF_SHARED, |
242 | devname: fsl_chan->chan_name, dev_id: fsl_chan); |
243 | if (ret) { |
244 | dev_err(&pdev->dev, "Can't register chan%d's IRQ.\n" , i); |
245 | return -EINVAL; |
246 | } |
247 | } |
248 | |
249 | return 0; |
250 | } |
251 | |
252 | static int |
253 | fsl_edma2_irq_init(struct platform_device *pdev, |
254 | struct fsl_edma_engine *fsl_edma) |
255 | { |
256 | int i, ret, irq; |
257 | int count; |
258 | |
259 | edma_writel(edma: fsl_edma, val: ~0, addr: fsl_edma->regs.intl); |
260 | |
261 | count = platform_irq_count(pdev); |
262 | dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n" , __func__, count); |
263 | if (count <= 2) { |
264 | dev_err(&pdev->dev, "Interrupts in DTS not correct.\n" ); |
265 | return -EINVAL; |
266 | } |
267 | /* |
268 | * 16 channel independent interrupts + 1 error interrupt on i.mx7ulp. |
269 | * 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17... |
270 | * For now, just simply request irq without IRQF_SHARED flag, since 16 |
271 | * channels are enough on i.mx7ulp whose M4 domain own some peripherals. |
272 | */ |
273 | for (i = 0; i < count; i++) { |
274 | irq = platform_get_irq(pdev, i); |
275 | if (irq < 0) |
276 | return -ENXIO; |
277 | |
278 | /* The last IRQ is for eDMA err */ |
279 | if (i == count - 1) |
280 | ret = devm_request_irq(dev: &pdev->dev, irq, |
281 | handler: fsl_edma_err_handler, |
282 | irqflags: 0, devname: "eDMA2-ERR" , dev_id: fsl_edma); |
283 | else |
284 | ret = devm_request_irq(dev: &pdev->dev, irq, |
285 | handler: fsl_edma_tx_handler, irqflags: 0, |
286 | devname: fsl_edma->chans[i].chan_name, |
287 | dev_id: fsl_edma); |
288 | if (ret) |
289 | return ret; |
290 | } |
291 | |
292 | return 0; |
293 | } |
294 | |
295 | static void fsl_edma_irq_exit( |
296 | struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) |
297 | { |
298 | if (fsl_edma->txirq == fsl_edma->errirq) { |
299 | devm_free_irq(dev: &pdev->dev, irq: fsl_edma->txirq, dev_id: fsl_edma); |
300 | } else { |
301 | devm_free_irq(dev: &pdev->dev, irq: fsl_edma->txirq, dev_id: fsl_edma); |
302 | devm_free_irq(dev: &pdev->dev, irq: fsl_edma->errirq, dev_id: fsl_edma); |
303 | } |
304 | } |
305 | |
306 | static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks) |
307 | { |
308 | int i; |
309 | |
310 | for (i = 0; i < nr_clocks; i++) |
311 | clk_disable_unprepare(clk: fsl_edma->muxclk[i]); |
312 | } |
313 | |
314 | static struct fsl_edma_drvdata vf610_data = { |
315 | .dmamuxs = DMAMUX_NR, |
316 | .flags = FSL_EDMA_DRV_WRAP_IO, |
317 | .chreg_off = EDMA_TCD, |
318 | .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd), |
319 | .setup_irq = fsl_edma_irq_init, |
320 | }; |
321 | |
322 | static struct fsl_edma_drvdata ls1028a_data = { |
323 | .dmamuxs = DMAMUX_NR, |
324 | .flags = FSL_EDMA_DRV_MUX_SWAP | FSL_EDMA_DRV_WRAP_IO, |
325 | .chreg_off = EDMA_TCD, |
326 | .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd), |
327 | .setup_irq = fsl_edma_irq_init, |
328 | }; |
329 | |
330 | static struct fsl_edma_drvdata imx7ulp_data = { |
331 | .dmamuxs = 1, |
332 | .chreg_off = EDMA_TCD, |
333 | .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd), |
334 | .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_CONFIG32, |
335 | .setup_irq = fsl_edma2_irq_init, |
336 | }; |
337 | |
338 | static struct fsl_edma_drvdata imx8qm_data = { |
339 | .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3, |
340 | .chreg_space_sz = 0x10000, |
341 | .chreg_off = 0x10000, |
342 | .setup_irq = fsl_edma3_irq_init, |
343 | }; |
344 | |
345 | static struct fsl_edma_drvdata imx8qm_audio_data = { |
346 | .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3, |
347 | .chreg_space_sz = 0x10000, |
348 | .chreg_off = 0x10000, |
349 | .setup_irq = fsl_edma3_irq_init, |
350 | }; |
351 | |
352 | static struct fsl_edma_drvdata imx93_data3 = { |
353 | .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3, |
354 | .chreg_space_sz = 0x10000, |
355 | .chreg_off = 0x10000, |
356 | .setup_irq = fsl_edma3_irq_init, |
357 | }; |
358 | |
359 | static struct fsl_edma_drvdata imx93_data4 = { |
360 | .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4, |
361 | .chreg_space_sz = 0x8000, |
362 | .chreg_off = 0x10000, |
363 | .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux), |
364 | .mux_skip = 0x8000, |
365 | .setup_irq = fsl_edma3_irq_init, |
366 | }; |
367 | |
368 | static struct fsl_edma_drvdata imx95_data5 = { |
369 | .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 | |
370 | FSL_EDMA_DRV_TCD64, |
371 | .chreg_space_sz = 0x8000, |
372 | .chreg_off = 0x10000, |
373 | .mux_off = 0x200, |
374 | .mux_skip = sizeof(u32), |
375 | .setup_irq = fsl_edma3_irq_init, |
376 | }; |
377 | |
378 | static const struct of_device_id fsl_edma_dt_ids[] = { |
379 | { .compatible = "fsl,vf610-edma" , .data = &vf610_data}, |
380 | { .compatible = "fsl,ls1028a-edma" , .data = &ls1028a_data}, |
381 | { .compatible = "fsl,imx7ulp-edma" , .data = &imx7ulp_data}, |
382 | { .compatible = "fsl,imx8qm-edma" , .data = &imx8qm_data}, |
383 | { .compatible = "fsl,imx8qm-adma" , .data = &imx8qm_audio_data}, |
384 | { .compatible = "fsl,imx93-edma3" , .data = &imx93_data3}, |
385 | { .compatible = "fsl,imx93-edma4" , .data = &imx93_data4}, |
386 | { .compatible = "fsl,imx95-edma5" , .data = &imx95_data5}, |
387 | { /* sentinel */ } |
388 | }; |
389 | MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); |
390 | |
391 | static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) |
392 | { |
393 | struct fsl_edma_chan *fsl_chan; |
394 | struct device_link *link; |
395 | struct device *pd_chan; |
396 | struct device *dev; |
397 | int i; |
398 | |
399 | dev = &pdev->dev; |
400 | |
401 | for (i = 0; i < fsl_edma->n_chans; i++) { |
402 | if (fsl_edma->chan_masked & BIT(i)) |
403 | continue; |
404 | |
405 | fsl_chan = &fsl_edma->chans[i]; |
406 | |
407 | pd_chan = dev_pm_domain_attach_by_id(dev, index: i); |
408 | if (IS_ERR_OR_NULL(ptr: pd_chan)) { |
409 | dev_err(dev, "Failed attach pd %d\n" , i); |
410 | return -EINVAL; |
411 | } |
412 | |
413 | link = device_link_add(consumer: dev, supplier: pd_chan, DL_FLAG_STATELESS | |
414 | DL_FLAG_PM_RUNTIME | |
415 | DL_FLAG_RPM_ACTIVE); |
416 | if (!link) { |
417 | dev_err(dev, "Failed to add device_link to %d\n" , i); |
418 | return -EINVAL; |
419 | } |
420 | |
421 | fsl_chan->pd_dev = pd_chan; |
422 | |
423 | pm_runtime_use_autosuspend(dev: fsl_chan->pd_dev); |
424 | pm_runtime_set_autosuspend_delay(dev: fsl_chan->pd_dev, delay: 200); |
425 | pm_runtime_set_active(dev: fsl_chan->pd_dev); |
426 | } |
427 | |
428 | return 0; |
429 | } |
430 | |
431 | static int fsl_edma_probe(struct platform_device *pdev) |
432 | { |
433 | struct device_node *np = pdev->dev.of_node; |
434 | struct fsl_edma_engine *fsl_edma; |
435 | const struct fsl_edma_drvdata *drvdata = NULL; |
436 | u32 chan_mask[2] = {0, 0}; |
437 | struct edma_regs *regs; |
438 | int chans; |
439 | int ret, i; |
440 | |
441 | drvdata = device_get_match_data(dev: &pdev->dev); |
442 | if (!drvdata) { |
443 | dev_err(&pdev->dev, "unable to find driver data\n" ); |
444 | return -EINVAL; |
445 | } |
446 | |
447 | ret = of_property_read_u32(np, propname: "dma-channels" , out_value: &chans); |
448 | if (ret) { |
449 | dev_err(&pdev->dev, "Can't get dma-channels.\n" ); |
450 | return ret; |
451 | } |
452 | |
453 | fsl_edma = devm_kzalloc(dev: &pdev->dev, struct_size(fsl_edma, chans, chans), |
454 | GFP_KERNEL); |
455 | if (!fsl_edma) |
456 | return -ENOMEM; |
457 | |
458 | fsl_edma->drvdata = drvdata; |
459 | fsl_edma->n_chans = chans; |
460 | mutex_init(&fsl_edma->fsl_edma_mutex); |
461 | |
462 | fsl_edma->membase = devm_platform_ioremap_resource(pdev, index: 0); |
463 | if (IS_ERR(ptr: fsl_edma->membase)) |
464 | return PTR_ERR(ptr: fsl_edma->membase); |
465 | |
466 | if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) { |
467 | fsl_edma_setup_regs(edma: fsl_edma); |
468 | regs = &fsl_edma->regs; |
469 | } |
470 | |
471 | if (drvdata->flags & FSL_EDMA_DRV_HAS_DMACLK) { |
472 | fsl_edma->dmaclk = devm_clk_get_enabled(dev: &pdev->dev, id: "dma" ); |
473 | if (IS_ERR(ptr: fsl_edma->dmaclk)) { |
474 | dev_err(&pdev->dev, "Missing DMA block clock.\n" ); |
475 | return PTR_ERR(ptr: fsl_edma->dmaclk); |
476 | } |
477 | } |
478 | |
479 | if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) { |
480 | fsl_edma->chclk = devm_clk_get_enabled(dev: &pdev->dev, id: "mp" ); |
481 | if (IS_ERR(ptr: fsl_edma->chclk)) { |
482 | dev_err(&pdev->dev, "Missing MP block clock.\n" ); |
483 | return PTR_ERR(ptr: fsl_edma->chclk); |
484 | } |
485 | } |
486 | |
487 | ret = of_property_read_variable_u32_array(np, propname: "dma-channel-mask" , out_values: chan_mask, sz_min: 1, sz_max: 2); |
488 | |
489 | if (ret > 0) { |
490 | fsl_edma->chan_masked = chan_mask[1]; |
491 | fsl_edma->chan_masked <<= 32; |
492 | fsl_edma->chan_masked |= chan_mask[0]; |
493 | } |
494 | |
495 | for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) { |
496 | char clkname[32]; |
497 | |
498 | /* eDMAv3 mux register move to TCD area if ch_mux exist */ |
499 | if (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG) |
500 | break; |
501 | |
502 | fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev, |
503 | index: 1 + i); |
504 | if (IS_ERR(ptr: fsl_edma->muxbase[i])) { |
505 | /* on error: disable all previously enabled clks */ |
506 | fsl_disable_clocks(fsl_edma, nr_clocks: i); |
507 | return PTR_ERR(ptr: fsl_edma->muxbase[i]); |
508 | } |
509 | |
510 | sprintf(buf: clkname, fmt: "dmamux%d" , i); |
511 | fsl_edma->muxclk[i] = devm_clk_get_enabled(dev: &pdev->dev, id: clkname); |
512 | if (IS_ERR(ptr: fsl_edma->muxclk[i])) { |
513 | dev_err(&pdev->dev, "Missing DMAMUX block clock.\n" ); |
514 | /* on error: disable all previously enabled clks */ |
515 | return PTR_ERR(ptr: fsl_edma->muxclk[i]); |
516 | } |
517 | } |
518 | |
519 | fsl_edma->big_endian = of_property_read_bool(np, propname: "big-endian" ); |
520 | |
521 | if (drvdata->flags & FSL_EDMA_DRV_HAS_PD) { |
522 | ret = fsl_edma3_attach_pd(pdev, fsl_edma); |
523 | if (ret) |
524 | return ret; |
525 | } |
526 | |
527 | if (drvdata->flags & FSL_EDMA_DRV_TCD64) |
528 | dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)); |
529 | |
530 | INIT_LIST_HEAD(list: &fsl_edma->dma_dev.channels); |
531 | for (i = 0; i < fsl_edma->n_chans; i++) { |
532 | struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i]; |
533 | int len; |
534 | |
535 | if (fsl_edma->chan_masked & BIT(i)) |
536 | continue; |
537 | |
538 | snprintf(buf: fsl_chan->chan_name, size: sizeof(fsl_chan->chan_name), fmt: "%s-CH%02d" , |
539 | dev_name(dev: &pdev->dev), i); |
540 | |
541 | fsl_chan->edma = fsl_edma; |
542 | fsl_chan->pm_state = RUNNING; |
543 | fsl_chan->slave_id = 0; |
544 | fsl_chan->idle = true; |
545 | fsl_chan->dma_dir = DMA_NONE; |
546 | fsl_chan->vchan.desc_free = fsl_edma_free_desc; |
547 | |
548 | len = (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG) ? |
549 | offsetof(struct fsl_edma3_ch_reg, tcd) : 0; |
550 | fsl_chan->tcd = fsl_edma->membase |
551 | + i * drvdata->chreg_space_sz + drvdata->chreg_off + len; |
552 | fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip; |
553 | |
554 | fsl_chan->pdev = pdev; |
555 | vchan_init(vc: &fsl_chan->vchan, dmadev: &fsl_edma->dma_dev); |
556 | |
557 | edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr); |
558 | fsl_edma_chan_mux(fsl_chan, slot: 0, enable: false); |
559 | } |
560 | |
561 | ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma); |
562 | if (ret) |
563 | return ret; |
564 | |
565 | dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask); |
566 | dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask); |
567 | dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask); |
568 | dma_cap_set(DMA_MEMCPY, fsl_edma->dma_dev.cap_mask); |
569 | |
570 | fsl_edma->dma_dev.dev = &pdev->dev; |
571 | fsl_edma->dma_dev.device_alloc_chan_resources |
572 | = fsl_edma_alloc_chan_resources; |
573 | fsl_edma->dma_dev.device_free_chan_resources |
574 | = fsl_edma_free_chan_resources; |
575 | fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status; |
576 | fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg; |
577 | fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic; |
578 | fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy; |
579 | fsl_edma->dma_dev.device_config = fsl_edma_slave_config; |
580 | fsl_edma->dma_dev.device_pause = fsl_edma_pause; |
581 | fsl_edma->dma_dev.device_resume = fsl_edma_resume; |
582 | fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all; |
583 | fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize; |
584 | fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending; |
585 | |
586 | fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS; |
587 | fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS; |
588 | |
589 | if (drvdata->flags & FSL_EDMA_DRV_BUS_8BYTE) { |
590 | fsl_edma->dma_dev.src_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); |
591 | fsl_edma->dma_dev.dst_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); |
592 | } |
593 | |
594 | fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
595 | if (drvdata->flags & FSL_EDMA_DRV_DEV_TO_DEV) |
596 | fsl_edma->dma_dev.directions |= BIT(DMA_DEV_TO_DEV); |
597 | |
598 | fsl_edma->dma_dev.copy_align = drvdata->flags & FSL_EDMA_DRV_ALIGN_64BYTE ? |
599 | DMAENGINE_ALIGN_64_BYTES : |
600 | DMAENGINE_ALIGN_32_BYTES; |
601 | |
602 | /* Per worst case 'nbytes = 1' take CITER as the max_seg_size */ |
603 | dma_set_max_seg_size(dev: fsl_edma->dma_dev.dev, |
604 | FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK)); |
605 | |
606 | fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; |
607 | |
608 | platform_set_drvdata(pdev, data: fsl_edma); |
609 | |
610 | ret = dma_async_device_register(device: &fsl_edma->dma_dev); |
611 | if (ret) { |
612 | dev_err(&pdev->dev, |
613 | "Can't register Freescale eDMA engine. (%d)\n" , ret); |
614 | return ret; |
615 | } |
616 | |
617 | ret = of_dma_controller_register(np, |
618 | of_dma_xlate: drvdata->flags & FSL_EDMA_DRV_SPLIT_REG ? fsl_edma3_xlate : fsl_edma_xlate, |
619 | data: fsl_edma); |
620 | if (ret) { |
621 | dev_err(&pdev->dev, |
622 | "Can't register Freescale eDMA of_dma. (%d)\n" , ret); |
623 | dma_async_device_unregister(device: &fsl_edma->dma_dev); |
624 | return ret; |
625 | } |
626 | |
627 | /* enable round robin arbitration */ |
628 | if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) |
629 | edma_writel(edma: fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, addr: regs->cr); |
630 | |
631 | return 0; |
632 | } |
633 | |
634 | static void fsl_edma_remove(struct platform_device *pdev) |
635 | { |
636 | struct device_node *np = pdev->dev.of_node; |
637 | struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev); |
638 | |
639 | fsl_edma_irq_exit(pdev, fsl_edma); |
640 | fsl_edma_cleanup_vchan(dmadev: &fsl_edma->dma_dev); |
641 | of_dma_controller_free(np); |
642 | dma_async_device_unregister(device: &fsl_edma->dma_dev); |
643 | fsl_disable_clocks(fsl_edma, nr_clocks: fsl_edma->drvdata->dmamuxs); |
644 | } |
645 | |
646 | static int fsl_edma_suspend_late(struct device *dev) |
647 | { |
648 | struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev); |
649 | struct fsl_edma_chan *fsl_chan; |
650 | unsigned long flags; |
651 | int i; |
652 | |
653 | for (i = 0; i < fsl_edma->n_chans; i++) { |
654 | fsl_chan = &fsl_edma->chans[i]; |
655 | if (fsl_edma->chan_masked & BIT(i)) |
656 | continue; |
657 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); |
658 | /* Make sure chan is idle or will force disable. */ |
659 | if (unlikely(!fsl_chan->idle)) { |
660 | dev_warn(dev, "WARN: There is non-idle channel." ); |
661 | fsl_edma_disable_request(fsl_chan); |
662 | fsl_edma_chan_mux(fsl_chan, slot: 0, enable: false); |
663 | } |
664 | |
665 | fsl_chan->pm_state = SUSPENDED; |
666 | spin_unlock_irqrestore(lock: &fsl_chan->vchan.lock, flags); |
667 | } |
668 | |
669 | return 0; |
670 | } |
671 | |
672 | static int fsl_edma_resume_early(struct device *dev) |
673 | { |
674 | struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev); |
675 | struct fsl_edma_chan *fsl_chan; |
676 | struct edma_regs *regs = &fsl_edma->regs; |
677 | int i; |
678 | |
679 | for (i = 0; i < fsl_edma->n_chans; i++) { |
680 | fsl_chan = &fsl_edma->chans[i]; |
681 | if (fsl_edma->chan_masked & BIT(i)) |
682 | continue; |
683 | fsl_chan->pm_state = RUNNING; |
684 | edma_write_tcdreg(fsl_chan, 0, csr); |
685 | if (fsl_chan->slave_id != 0) |
686 | fsl_edma_chan_mux(fsl_chan, slot: fsl_chan->slave_id, enable: true); |
687 | } |
688 | |
689 | if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) |
690 | edma_writel(edma: fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, addr: regs->cr); |
691 | |
692 | return 0; |
693 | } |
694 | |
695 | /* |
696 | * eDMA provides the service to others, so it should be suspend late |
697 | * and resume early. When eDMA suspend, all of the clients should stop |
698 | * the DMA data transmission and let the channel idle. |
699 | */ |
700 | static const struct dev_pm_ops fsl_edma_pm_ops = { |
701 | .suspend_late = fsl_edma_suspend_late, |
702 | .resume_early = fsl_edma_resume_early, |
703 | }; |
704 | |
705 | static struct platform_driver fsl_edma_driver = { |
706 | .driver = { |
707 | .name = "fsl-edma" , |
708 | .of_match_table = fsl_edma_dt_ids, |
709 | .pm = &fsl_edma_pm_ops, |
710 | }, |
711 | .probe = fsl_edma_probe, |
712 | .remove_new = fsl_edma_remove, |
713 | }; |
714 | |
715 | static int __init fsl_edma_init(void) |
716 | { |
717 | return platform_driver_register(&fsl_edma_driver); |
718 | } |
719 | subsys_initcall(fsl_edma_init); |
720 | |
721 | static void __exit fsl_edma_exit(void) |
722 | { |
723 | platform_driver_unregister(&fsl_edma_driver); |
724 | } |
725 | module_exit(fsl_edma_exit); |
726 | |
727 | MODULE_ALIAS("platform:fsl-edma" ); |
728 | MODULE_DESCRIPTION("Freescale eDMA engine driver" ); |
729 | MODULE_LICENSE("GPL v2" ); |
730 | |