1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // |
3 | // Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. |
4 | // |
5 | // Refer to drivers/dma/imx-sdma.c |
6 | |
7 | #include <linux/init.h> |
8 | #include <linux/types.h> |
9 | #include <linux/mm.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/clk.h> |
12 | #include <linux/wait.h> |
13 | #include <linux/sched.h> |
14 | #include <linux/semaphore.h> |
15 | #include <linux/device.h> |
16 | #include <linux/dma-mapping.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/platform_device.h> |
19 | #include <linux/dmaengine.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/module.h> |
22 | #include <linux/stmp_device.h> |
23 | #include <linux/of.h> |
24 | #include <linux/of_dma.h> |
25 | #include <linux/list.h> |
26 | #include <linux/dma/mxs-dma.h> |
27 | |
28 | #include <asm/irq.h> |
29 | |
30 | #include "dmaengine.h" |
31 | |
32 | /* |
33 | * NOTE: The term "PIO" throughout the mxs-dma implementation means |
34 | * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, |
35 | * dma can program the controller registers of peripheral devices. |
36 | */ |
37 | |
38 | #define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH) |
39 | #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA) |
40 | |
41 | #define HW_APBHX_CTRL0 0x000 |
42 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) |
43 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) |
44 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 |
45 | #define HW_APBHX_CTRL1 0x010 |
46 | #define HW_APBHX_CTRL2 0x020 |
47 | #define HW_APBHX_CHANNEL_CTRL 0x030 |
48 | #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 |
49 | /* |
50 | * The offset of NXTCMDAR register is different per both dma type and version, |
51 | * while stride for each channel is all the same 0x70. |
52 | */ |
53 | #define HW_APBHX_CHn_NXTCMDAR(d, n) \ |
54 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) |
55 | #define HW_APBHX_CHn_SEMA(d, n) \ |
56 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) |
57 | #define HW_APBHX_CHn_BAR(d, n) \ |
58 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70) |
59 | #define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70) |
60 | |
61 | /* |
62 | * ccw bits definitions |
63 | * |
64 | * COMMAND: 0..1 (2) |
65 | * CHAIN: 2 (1) |
66 | * IRQ: 3 (1) |
67 | * NAND_LOCK: 4 (1) - not implemented |
68 | * NAND_WAIT4READY: 5 (1) - not implemented |
69 | * DEC_SEM: 6 (1) |
70 | * WAIT4END: 7 (1) |
71 | * HALT_ON_TERMINATE: 8 (1) |
72 | * TERMINATE_FLUSH: 9 (1) |
73 | * RESERVED: 10..11 (2) |
74 | * PIO_NUM: 12..15 (4) |
75 | */ |
76 | #define BP_CCW_COMMAND 0 |
77 | #define BM_CCW_COMMAND (3 << 0) |
78 | #define CCW_CHAIN (1 << 2) |
79 | #define CCW_IRQ (1 << 3) |
80 | #define CCW_WAIT4RDY (1 << 5) |
81 | #define CCW_DEC_SEM (1 << 6) |
82 | #define CCW_WAIT4END (1 << 7) |
83 | #define CCW_HALT_ON_TERM (1 << 8) |
84 | #define CCW_TERM_FLUSH (1 << 9) |
85 | #define BP_CCW_PIO_NUM 12 |
86 | #define BM_CCW_PIO_NUM (0xf << 12) |
87 | |
88 | #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) |
89 | |
90 | #define MXS_DMA_CMD_NO_XFER 0 |
91 | #define MXS_DMA_CMD_WRITE 1 |
92 | #define MXS_DMA_CMD_READ 2 |
93 | #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ |
94 | |
95 | struct mxs_dma_ccw { |
96 | u32 next; |
97 | u16 bits; |
98 | u16 xfer_bytes; |
99 | #define MAX_XFER_BYTES 0xff00 |
100 | u32 bufaddr; |
101 | #define MXS_PIO_WORDS 16 |
102 | u32 pio_words[MXS_PIO_WORDS]; |
103 | }; |
104 | |
105 | #define CCW_BLOCK_SIZE (4 * PAGE_SIZE) |
106 | #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw)) |
107 | |
108 | struct mxs_dma_chan { |
109 | struct mxs_dma_engine *mxs_dma; |
110 | struct dma_chan chan; |
111 | struct dma_async_tx_descriptor desc; |
112 | struct tasklet_struct tasklet; |
113 | unsigned int chan_irq; |
114 | struct mxs_dma_ccw *ccw; |
115 | dma_addr_t ccw_phys; |
116 | int desc_count; |
117 | enum dma_status status; |
118 | unsigned int flags; |
119 | bool reset; |
120 | #define MXS_DMA_SG_LOOP (1 << 0) |
121 | #define MXS_DMA_USE_SEMAPHORE (1 << 1) |
122 | }; |
123 | |
124 | #define MXS_DMA_CHANNELS 16 |
125 | #define MXS_DMA_CHANNELS_MASK 0xffff |
126 | |
127 | enum mxs_dma_devtype { |
128 | MXS_DMA_APBH, |
129 | MXS_DMA_APBX, |
130 | }; |
131 | |
132 | enum mxs_dma_id { |
133 | IMX23_DMA, |
134 | IMX28_DMA, |
135 | }; |
136 | |
137 | struct mxs_dma_engine { |
138 | enum mxs_dma_id dev_id; |
139 | enum mxs_dma_devtype type; |
140 | void __iomem *base; |
141 | struct clk *clk; |
142 | struct dma_device dma_device; |
143 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; |
144 | struct platform_device *pdev; |
145 | unsigned int nr_channels; |
146 | }; |
147 | |
148 | struct mxs_dma_type { |
149 | enum mxs_dma_id id; |
150 | enum mxs_dma_devtype type; |
151 | }; |
152 | |
153 | static struct mxs_dma_type mxs_dma_types[] = { |
154 | { |
155 | .id = IMX23_DMA, |
156 | .type = MXS_DMA_APBH, |
157 | }, { |
158 | .id = IMX23_DMA, |
159 | .type = MXS_DMA_APBX, |
160 | }, { |
161 | .id = IMX28_DMA, |
162 | .type = MXS_DMA_APBH, |
163 | }, { |
164 | .id = IMX28_DMA, |
165 | .type = MXS_DMA_APBX, |
166 | } |
167 | }; |
168 | |
169 | static const struct of_device_id mxs_dma_dt_ids[] = { |
170 | { .compatible = "fsl,imx23-dma-apbh" , .data = &mxs_dma_types[0], }, |
171 | { .compatible = "fsl,imx23-dma-apbx" , .data = &mxs_dma_types[1], }, |
172 | { .compatible = "fsl,imx28-dma-apbh" , .data = &mxs_dma_types[2], }, |
173 | { .compatible = "fsl,imx28-dma-apbx" , .data = &mxs_dma_types[3], }, |
174 | { /* sentinel */ } |
175 | }; |
176 | MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids); |
177 | |
178 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) |
179 | { |
180 | return container_of(chan, struct mxs_dma_chan, chan); |
181 | } |
182 | |
183 | static void mxs_dma_reset_chan(struct dma_chan *chan) |
184 | { |
185 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
186 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
187 | int chan_id = mxs_chan->chan.chan_id; |
188 | |
189 | /* |
190 | * mxs dma channel resets can cause a channel stall. To recover from a |
191 | * channel stall, we have to reset the whole DMA engine. To avoid this, |
192 | * we use cyclic DMA with semaphores, that are enhanced in |
193 | * mxs_dma_int_handler. To reset the channel, we can simply stop writing |
194 | * into the semaphore counter. |
195 | */ |
196 | if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && |
197 | mxs_chan->flags & MXS_DMA_SG_LOOP) { |
198 | mxs_chan->reset = true; |
199 | } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) { |
200 | writel(val: 1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), |
201 | addr: mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
202 | } else { |
203 | unsigned long elapsed = 0; |
204 | const unsigned long max_wait = 50000; /* 50ms */ |
205 | void __iomem *reg_dbg1 = mxs_dma->base + |
206 | HW_APBX_CHn_DEBUG1(mxs_dma, chan_id); |
207 | |
208 | /* |
209 | * On i.MX28 APBX, the DMA channel can stop working if we reset |
210 | * the channel while it is in READ_FLUSH (0x08) state. |
211 | * We wait here until we leave the state. Then we trigger the |
212 | * reset. Waiting a maximum of 50ms, the kernel shouldn't crash |
213 | * because of this. |
214 | */ |
215 | while ((readl(addr: reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) { |
216 | udelay(100); |
217 | elapsed += 100; |
218 | } |
219 | |
220 | if (elapsed >= max_wait) |
221 | dev_err(&mxs_chan->mxs_dma->pdev->dev, |
222 | "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n" , |
223 | chan_id); |
224 | |
225 | writel(val: 1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), |
226 | addr: mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); |
227 | } |
228 | |
229 | mxs_chan->status = DMA_COMPLETE; |
230 | } |
231 | |
232 | static void mxs_dma_enable_chan(struct dma_chan *chan) |
233 | { |
234 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
235 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
236 | int chan_id = mxs_chan->chan.chan_id; |
237 | |
238 | /* set cmd_addr up */ |
239 | writel(val: mxs_chan->ccw_phys, |
240 | addr: mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); |
241 | |
242 | /* write 1 to SEMA to kick off the channel */ |
243 | if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && |
244 | mxs_chan->flags & MXS_DMA_SG_LOOP) { |
245 | /* A cyclic DMA consists of at least 2 segments, so initialize |
246 | * the semaphore with 2 so we have enough time to add 1 to the |
247 | * semaphore if we need to */ |
248 | writel(val: 2, addr: mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); |
249 | } else { |
250 | writel(val: 1, addr: mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); |
251 | } |
252 | mxs_chan->reset = false; |
253 | } |
254 | |
255 | static void mxs_dma_disable_chan(struct dma_chan *chan) |
256 | { |
257 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
258 | |
259 | mxs_chan->status = DMA_COMPLETE; |
260 | } |
261 | |
262 | static int mxs_dma_pause_chan(struct dma_chan *chan) |
263 | { |
264 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
265 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
266 | int chan_id = mxs_chan->chan.chan_id; |
267 | |
268 | /* freeze the channel */ |
269 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) |
270 | writel(val: 1 << chan_id, |
271 | addr: mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
272 | else |
273 | writel(val: 1 << chan_id, |
274 | addr: mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); |
275 | |
276 | mxs_chan->status = DMA_PAUSED; |
277 | return 0; |
278 | } |
279 | |
280 | static int mxs_dma_resume_chan(struct dma_chan *chan) |
281 | { |
282 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
283 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
284 | int chan_id = mxs_chan->chan.chan_id; |
285 | |
286 | /* unfreeze the channel */ |
287 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) |
288 | writel(val: 1 << chan_id, |
289 | addr: mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR); |
290 | else |
291 | writel(val: 1 << chan_id, |
292 | addr: mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); |
293 | |
294 | mxs_chan->status = DMA_IN_PROGRESS; |
295 | return 0; |
296 | } |
297 | |
298 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
299 | { |
300 | return dma_cookie_assign(tx); |
301 | } |
302 | |
303 | static void mxs_dma_tasklet(struct tasklet_struct *t) |
304 | { |
305 | struct mxs_dma_chan *mxs_chan = from_tasklet(mxs_chan, t, tasklet); |
306 | |
307 | dmaengine_desc_get_callback_invoke(tx: &mxs_chan->desc, NULL); |
308 | } |
309 | |
310 | static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq) |
311 | { |
312 | int i; |
313 | |
314 | for (i = 0; i != mxs_dma->nr_channels; ++i) |
315 | if (mxs_dma->mxs_chans[i].chan_irq == irq) |
316 | return i; |
317 | |
318 | return -EINVAL; |
319 | } |
320 | |
321 | static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) |
322 | { |
323 | struct mxs_dma_engine *mxs_dma = dev_id; |
324 | struct mxs_dma_chan *mxs_chan; |
325 | u32 completed; |
326 | u32 err; |
327 | int chan = mxs_dma_irq_to_chan(mxs_dma, irq); |
328 | |
329 | if (chan < 0) |
330 | return IRQ_NONE; |
331 | |
332 | /* completion status */ |
333 | completed = readl(addr: mxs_dma->base + HW_APBHX_CTRL1); |
334 | completed = (completed >> chan) & 0x1; |
335 | |
336 | /* Clear interrupt */ |
337 | writel(val: (1 << chan), |
338 | addr: mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); |
339 | |
340 | /* error status */ |
341 | err = readl(addr: mxs_dma->base + HW_APBHX_CTRL2); |
342 | err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan); |
343 | |
344 | /* |
345 | * error status bit is in the upper 16 bits, error irq bit in the lower |
346 | * 16 bits. We transform it into a simpler error code: |
347 | * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR |
348 | */ |
349 | err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan); |
350 | |
351 | /* Clear error irq */ |
352 | writel(val: (1 << chan), |
353 | addr: mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); |
354 | |
355 | /* |
356 | * When both completion and error of termination bits set at the |
357 | * same time, we do not take it as an error. IOW, it only becomes |
358 | * an error we need to handle here in case of either it's a bus |
359 | * error or a termination error with no completion. 0x01 is termination |
360 | * error, so we can subtract err & completed to get the real error case. |
361 | */ |
362 | err -= err & completed; |
363 | |
364 | mxs_chan = &mxs_dma->mxs_chans[chan]; |
365 | |
366 | if (err) { |
367 | dev_dbg(mxs_dma->dma_device.dev, |
368 | "%s: error in channel %d\n" , __func__, |
369 | chan); |
370 | mxs_chan->status = DMA_ERROR; |
371 | mxs_dma_reset_chan(chan: &mxs_chan->chan); |
372 | } else if (mxs_chan->status != DMA_COMPLETE) { |
373 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) { |
374 | mxs_chan->status = DMA_IN_PROGRESS; |
375 | if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE) |
376 | writel(val: 1, addr: mxs_dma->base + |
377 | HW_APBHX_CHn_SEMA(mxs_dma, chan)); |
378 | } else { |
379 | mxs_chan->status = DMA_COMPLETE; |
380 | } |
381 | } |
382 | |
383 | if (mxs_chan->status == DMA_COMPLETE) { |
384 | if (mxs_chan->reset) |
385 | return IRQ_HANDLED; |
386 | dma_cookie_complete(tx: &mxs_chan->desc); |
387 | } |
388 | |
389 | /* schedule tasklet on this channel */ |
390 | tasklet_schedule(t: &mxs_chan->tasklet); |
391 | |
392 | return IRQ_HANDLED; |
393 | } |
394 | |
395 | static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) |
396 | { |
397 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
398 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
399 | int ret; |
400 | |
401 | mxs_chan->ccw = dma_alloc_coherent(dev: mxs_dma->dma_device.dev, |
402 | CCW_BLOCK_SIZE, |
403 | dma_handle: &mxs_chan->ccw_phys, GFP_KERNEL); |
404 | if (!mxs_chan->ccw) { |
405 | ret = -ENOMEM; |
406 | goto err_alloc; |
407 | } |
408 | |
409 | ret = request_irq(irq: mxs_chan->chan_irq, handler: mxs_dma_int_handler, |
410 | flags: 0, name: "mxs-dma" , dev: mxs_dma); |
411 | if (ret) |
412 | goto err_irq; |
413 | |
414 | ret = clk_prepare_enable(clk: mxs_dma->clk); |
415 | if (ret) |
416 | goto err_clk; |
417 | |
418 | mxs_dma_reset_chan(chan); |
419 | |
420 | dma_async_tx_descriptor_init(tx: &mxs_chan->desc, chan); |
421 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; |
422 | |
423 | /* the descriptor is ready */ |
424 | async_tx_ack(tx: &mxs_chan->desc); |
425 | |
426 | return 0; |
427 | |
428 | err_clk: |
429 | free_irq(mxs_chan->chan_irq, mxs_dma); |
430 | err_irq: |
431 | dma_free_coherent(dev: mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, |
432 | cpu_addr: mxs_chan->ccw, dma_handle: mxs_chan->ccw_phys); |
433 | err_alloc: |
434 | return ret; |
435 | } |
436 | |
437 | static void mxs_dma_free_chan_resources(struct dma_chan *chan) |
438 | { |
439 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
440 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
441 | |
442 | mxs_dma_disable_chan(chan); |
443 | |
444 | free_irq(mxs_chan->chan_irq, mxs_dma); |
445 | |
446 | dma_free_coherent(dev: mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, |
447 | cpu_addr: mxs_chan->ccw, dma_handle: mxs_chan->ccw_phys); |
448 | |
449 | clk_disable_unprepare(clk: mxs_dma->clk); |
450 | } |
451 | |
452 | /* |
453 | * How to use the flags for ->device_prep_slave_sg() : |
454 | * [1] If there is only one DMA command in the DMA chain, the code should be: |
455 | * ...... |
456 | * ->device_prep_slave_sg(DMA_CTRL_ACK); |
457 | * ...... |
458 | * [2] If there are two DMA commands in the DMA chain, the code should be |
459 | * ...... |
460 | * ->device_prep_slave_sg(0); |
461 | * ...... |
462 | * ->device_prep_slave_sg(DMA_CTRL_ACK); |
463 | * ...... |
464 | * [3] If there are more than two DMA commands in the DMA chain, the code |
465 | * should be: |
466 | * ...... |
467 | * ->device_prep_slave_sg(0); // First |
468 | * ...... |
469 | * ->device_prep_slave_sg(DMA_CTRL_ACK]); |
470 | * ...... |
471 | * ->device_prep_slave_sg(DMA_CTRL_ACK); // Last |
472 | * ...... |
473 | */ |
474 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( |
475 | struct dma_chan *chan, struct scatterlist *sgl, |
476 | unsigned int sg_len, enum dma_transfer_direction direction, |
477 | unsigned long flags, void *context) |
478 | { |
479 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
480 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
481 | struct mxs_dma_ccw *ccw; |
482 | struct scatterlist *sg; |
483 | u32 i, j; |
484 | u32 *pio; |
485 | int idx = 0; |
486 | |
487 | if (mxs_chan->status == DMA_IN_PROGRESS) |
488 | idx = mxs_chan->desc_count; |
489 | |
490 | if (sg_len + idx > NUM_CCW) { |
491 | dev_err(mxs_dma->dma_device.dev, |
492 | "maximum number of sg exceeded: %d > %d\n" , |
493 | sg_len, NUM_CCW); |
494 | goto err_out; |
495 | } |
496 | |
497 | mxs_chan->status = DMA_IN_PROGRESS; |
498 | mxs_chan->flags = 0; |
499 | |
500 | /* |
501 | * If the sg is prepared with append flag set, the sg |
502 | * will be appended to the last prepared sg. |
503 | */ |
504 | if (idx) { |
505 | BUG_ON(idx < 1); |
506 | ccw = &mxs_chan->ccw[idx - 1]; |
507 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; |
508 | ccw->bits |= CCW_CHAIN; |
509 | ccw->bits &= ~CCW_IRQ; |
510 | ccw->bits &= ~CCW_DEC_SEM; |
511 | } else { |
512 | idx = 0; |
513 | } |
514 | |
515 | if (direction == DMA_TRANS_NONE) { |
516 | ccw = &mxs_chan->ccw[idx++]; |
517 | pio = (u32 *) sgl; |
518 | |
519 | for (j = 0; j < sg_len;) |
520 | ccw->pio_words[j++] = *pio++; |
521 | |
522 | ccw->bits = 0; |
523 | ccw->bits |= CCW_IRQ; |
524 | ccw->bits |= CCW_DEC_SEM; |
525 | if (flags & MXS_DMA_CTRL_WAIT4END) |
526 | ccw->bits |= CCW_WAIT4END; |
527 | ccw->bits |= CCW_HALT_ON_TERM; |
528 | ccw->bits |= CCW_TERM_FLUSH; |
529 | ccw->bits |= BF_CCW(sg_len, PIO_NUM); |
530 | ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); |
531 | if (flags & MXS_DMA_CTRL_WAIT4RDY) |
532 | ccw->bits |= CCW_WAIT4RDY; |
533 | } else { |
534 | for_each_sg(sgl, sg, sg_len, i) { |
535 | if (sg_dma_len(sg) > MAX_XFER_BYTES) { |
536 | dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n" , |
537 | sg_dma_len(sg), MAX_XFER_BYTES); |
538 | goto err_out; |
539 | } |
540 | |
541 | ccw = &mxs_chan->ccw[idx++]; |
542 | |
543 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; |
544 | ccw->bufaddr = sg->dma_address; |
545 | ccw->xfer_bytes = sg_dma_len(sg); |
546 | |
547 | ccw->bits = 0; |
548 | ccw->bits |= CCW_CHAIN; |
549 | ccw->bits |= CCW_HALT_ON_TERM; |
550 | ccw->bits |= CCW_TERM_FLUSH; |
551 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
552 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, |
553 | COMMAND); |
554 | |
555 | if (i + 1 == sg_len) { |
556 | ccw->bits &= ~CCW_CHAIN; |
557 | ccw->bits |= CCW_IRQ; |
558 | ccw->bits |= CCW_DEC_SEM; |
559 | if (flags & MXS_DMA_CTRL_WAIT4END) |
560 | ccw->bits |= CCW_WAIT4END; |
561 | } |
562 | } |
563 | } |
564 | mxs_chan->desc_count = idx; |
565 | |
566 | return &mxs_chan->desc; |
567 | |
568 | err_out: |
569 | mxs_chan->status = DMA_ERROR; |
570 | return NULL; |
571 | } |
572 | |
573 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( |
574 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
575 | size_t period_len, enum dma_transfer_direction direction, |
576 | unsigned long flags) |
577 | { |
578 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
579 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
580 | u32 num_periods = buf_len / period_len; |
581 | u32 i = 0, buf = 0; |
582 | |
583 | if (mxs_chan->status == DMA_IN_PROGRESS) |
584 | return NULL; |
585 | |
586 | mxs_chan->status = DMA_IN_PROGRESS; |
587 | mxs_chan->flags |= MXS_DMA_SG_LOOP; |
588 | mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE; |
589 | |
590 | if (num_periods > NUM_CCW) { |
591 | dev_err(mxs_dma->dma_device.dev, |
592 | "maximum number of sg exceeded: %d > %d\n" , |
593 | num_periods, NUM_CCW); |
594 | goto err_out; |
595 | } |
596 | |
597 | if (period_len > MAX_XFER_BYTES) { |
598 | dev_err(mxs_dma->dma_device.dev, |
599 | "maximum period size exceeded: %zu > %d\n" , |
600 | period_len, MAX_XFER_BYTES); |
601 | goto err_out; |
602 | } |
603 | |
604 | while (buf < buf_len) { |
605 | struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; |
606 | |
607 | if (i + 1 == num_periods) |
608 | ccw->next = mxs_chan->ccw_phys; |
609 | else |
610 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); |
611 | |
612 | ccw->bufaddr = dma_addr; |
613 | ccw->xfer_bytes = period_len; |
614 | |
615 | ccw->bits = 0; |
616 | ccw->bits |= CCW_CHAIN; |
617 | ccw->bits |= CCW_IRQ; |
618 | ccw->bits |= CCW_HALT_ON_TERM; |
619 | ccw->bits |= CCW_TERM_FLUSH; |
620 | ccw->bits |= CCW_DEC_SEM; |
621 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
622 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); |
623 | |
624 | dma_addr += period_len; |
625 | buf += period_len; |
626 | |
627 | i++; |
628 | } |
629 | mxs_chan->desc_count = i; |
630 | |
631 | return &mxs_chan->desc; |
632 | |
633 | err_out: |
634 | mxs_chan->status = DMA_ERROR; |
635 | return NULL; |
636 | } |
637 | |
638 | static int mxs_dma_terminate_all(struct dma_chan *chan) |
639 | { |
640 | mxs_dma_reset_chan(chan); |
641 | mxs_dma_disable_chan(chan); |
642 | |
643 | return 0; |
644 | } |
645 | |
646 | static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, |
647 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
648 | { |
649 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
650 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
651 | u32 residue = 0; |
652 | |
653 | if (mxs_chan->status == DMA_IN_PROGRESS && |
654 | mxs_chan->flags & MXS_DMA_SG_LOOP) { |
655 | struct mxs_dma_ccw *last_ccw; |
656 | u32 bar; |
657 | |
658 | last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1]; |
659 | residue = last_ccw->xfer_bytes + last_ccw->bufaddr; |
660 | |
661 | bar = readl(addr: mxs_dma->base + |
662 | HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id)); |
663 | residue -= bar; |
664 | } |
665 | |
666 | dma_set_tx_state(st: txstate, last: chan->completed_cookie, used: chan->cookie, |
667 | residue); |
668 | |
669 | return mxs_chan->status; |
670 | } |
671 | |
672 | static int mxs_dma_init(struct mxs_dma_engine *mxs_dma) |
673 | { |
674 | int ret; |
675 | |
676 | ret = clk_prepare_enable(clk: mxs_dma->clk); |
677 | if (ret) |
678 | return ret; |
679 | |
680 | ret = stmp_reset_block(mxs_dma->base); |
681 | if (ret) |
682 | goto err_out; |
683 | |
684 | /* enable apbh burst */ |
685 | if (dma_is_apbh(mxs_dma)) { |
686 | writel(BM_APBH_CTRL0_APB_BURST_EN, |
687 | addr: mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
688 | writel(BM_APBH_CTRL0_APB_BURST8_EN, |
689 | addr: mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
690 | } |
691 | |
692 | /* enable irq for all the channels */ |
693 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, |
694 | addr: mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET); |
695 | |
696 | err_out: |
697 | clk_disable_unprepare(clk: mxs_dma->clk); |
698 | return ret; |
699 | } |
700 | |
701 | struct mxs_dma_filter_param { |
702 | unsigned int chan_id; |
703 | }; |
704 | |
705 | static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param) |
706 | { |
707 | struct mxs_dma_filter_param *param = fn_param; |
708 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
709 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
710 | int chan_irq; |
711 | |
712 | if (chan->chan_id != param->chan_id) |
713 | return false; |
714 | |
715 | chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id); |
716 | if (chan_irq < 0) |
717 | return false; |
718 | |
719 | mxs_chan->chan_irq = chan_irq; |
720 | |
721 | return true; |
722 | } |
723 | |
724 | static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, |
725 | struct of_dma *ofdma) |
726 | { |
727 | struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data; |
728 | dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask; |
729 | struct mxs_dma_filter_param param; |
730 | |
731 | if (dma_spec->args_count != 1) |
732 | return NULL; |
733 | |
734 | param.chan_id = dma_spec->args[0]; |
735 | |
736 | if (param.chan_id >= mxs_dma->nr_channels) |
737 | return NULL; |
738 | |
739 | return __dma_request_channel(mask: &mask, fn: mxs_dma_filter_fn, fn_param: ¶m, |
740 | np: ofdma->of_node); |
741 | } |
742 | |
743 | static int mxs_dma_probe(struct platform_device *pdev) |
744 | { |
745 | struct device_node *np = pdev->dev.of_node; |
746 | const struct mxs_dma_type *dma_type; |
747 | struct mxs_dma_engine *mxs_dma; |
748 | int ret, i; |
749 | |
750 | mxs_dma = devm_kzalloc(dev: &pdev->dev, size: sizeof(*mxs_dma), GFP_KERNEL); |
751 | if (!mxs_dma) |
752 | return -ENOMEM; |
753 | |
754 | ret = of_property_read_u32(np, propname: "dma-channels" , out_value: &mxs_dma->nr_channels); |
755 | if (ret) { |
756 | dev_err(&pdev->dev, "failed to read dma-channels\n" ); |
757 | return ret; |
758 | } |
759 | |
760 | dma_type = (struct mxs_dma_type *)of_device_get_match_data(dev: &pdev->dev); |
761 | mxs_dma->type = dma_type->type; |
762 | mxs_dma->dev_id = dma_type->id; |
763 | |
764 | mxs_dma->base = devm_platform_ioremap_resource(pdev, index: 0); |
765 | if (IS_ERR(ptr: mxs_dma->base)) |
766 | return PTR_ERR(ptr: mxs_dma->base); |
767 | |
768 | mxs_dma->clk = devm_clk_get(dev: &pdev->dev, NULL); |
769 | if (IS_ERR(ptr: mxs_dma->clk)) |
770 | return PTR_ERR(ptr: mxs_dma->clk); |
771 | |
772 | dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); |
773 | dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); |
774 | |
775 | INIT_LIST_HEAD(list: &mxs_dma->dma_device.channels); |
776 | |
777 | /* Initialize channel parameters */ |
778 | for (i = 0; i < MXS_DMA_CHANNELS; i++) { |
779 | struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; |
780 | |
781 | mxs_chan->mxs_dma = mxs_dma; |
782 | mxs_chan->chan.device = &mxs_dma->dma_device; |
783 | dma_cookie_init(chan: &mxs_chan->chan); |
784 | |
785 | tasklet_setup(t: &mxs_chan->tasklet, callback: mxs_dma_tasklet); |
786 | |
787 | |
788 | /* Add the channel to mxs_chan list */ |
789 | list_add_tail(new: &mxs_chan->chan.device_node, |
790 | head: &mxs_dma->dma_device.channels); |
791 | } |
792 | |
793 | ret = mxs_dma_init(mxs_dma); |
794 | if (ret) |
795 | return ret; |
796 | |
797 | mxs_dma->pdev = pdev; |
798 | mxs_dma->dma_device.dev = &pdev->dev; |
799 | |
800 | /* mxs_dma gets 65535 bytes maximum sg size */ |
801 | dma_set_max_seg_size(dev: mxs_dma->dma_device.dev, MAX_XFER_BYTES); |
802 | |
803 | mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; |
804 | mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; |
805 | mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; |
806 | mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; |
807 | mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; |
808 | mxs_dma->dma_device.device_pause = mxs_dma_pause_chan; |
809 | mxs_dma->dma_device.device_resume = mxs_dma_resume_chan; |
810 | mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all; |
811 | mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
812 | mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
813 | mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
814 | mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
815 | mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan; |
816 | |
817 | ret = dmaenginem_async_device_register(device: &mxs_dma->dma_device); |
818 | if (ret) { |
819 | dev_err(mxs_dma->dma_device.dev, "unable to register\n" ); |
820 | return ret; |
821 | } |
822 | |
823 | ret = of_dma_controller_register(np, of_dma_xlate: mxs_dma_xlate, data: mxs_dma); |
824 | if (ret) { |
825 | dev_err(mxs_dma->dma_device.dev, |
826 | "failed to register controller\n" ); |
827 | } |
828 | |
829 | dev_info(mxs_dma->dma_device.dev, "initialized\n" ); |
830 | |
831 | return 0; |
832 | } |
833 | |
834 | static struct platform_driver mxs_dma_driver = { |
835 | .driver = { |
836 | .name = "mxs-dma" , |
837 | .of_match_table = mxs_dma_dt_ids, |
838 | }, |
839 | .probe = mxs_dma_probe, |
840 | }; |
841 | |
842 | builtin_platform_driver(mxs_dma_driver); |
843 | |