1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* |
3 | * Copyright 2013-2014 Freescale Semiconductor, Inc. |
4 | * Copyright 2018 Angelo Dureghello <angelo@sysam.it> |
5 | */ |
6 | #ifndef _FSL_EDMA_COMMON_H_ |
7 | #define _FSL_EDMA_COMMON_H_ |
8 | |
9 | #include <linux/dma-direction.h> |
10 | #include <linux/platform_device.h> |
11 | #include "virt-dma.h" |
12 | |
13 | #define EDMA_CR_EDBG BIT(1) |
14 | #define EDMA_CR_ERCA BIT(2) |
15 | #define EDMA_CR_ERGA BIT(3) |
16 | #define EDMA_CR_HOE BIT(4) |
17 | #define EDMA_CR_HALT BIT(5) |
18 | #define EDMA_CR_CLM BIT(6) |
19 | #define EDMA_CR_EMLM BIT(7) |
20 | #define EDMA_CR_ECX BIT(16) |
21 | #define EDMA_CR_CX BIT(17) |
22 | |
23 | #define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0)) |
24 | #define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0)) |
25 | #define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0)) |
26 | #define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0)) |
27 | |
28 | #define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0))) |
29 | #define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3) |
30 | #define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8) |
31 | #define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11) |
32 | |
33 | #define EDMA_TCD_ITER_MASK GENMASK(14, 0) |
34 | #define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK) |
35 | #define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK) |
36 | |
37 | #define EDMA_TCD_CSR_START BIT(0) |
38 | #define EDMA_TCD_CSR_INT_MAJOR BIT(1) |
39 | #define EDMA_TCD_CSR_INT_HALF BIT(2) |
40 | #define EDMA_TCD_CSR_D_REQ BIT(3) |
41 | #define EDMA_TCD_CSR_E_SG BIT(4) |
42 | #define EDMA_TCD_CSR_E_LINK BIT(5) |
43 | #define EDMA_TCD_CSR_ACTIVE BIT(6) |
44 | #define EDMA_TCD_CSR_DONE BIT(7) |
45 | |
46 | #define EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(x) ((x) & GENMASK(9, 0)) |
47 | #define EDMA_V3_TCD_NBYTES_MLOFF(x) (x << 10) |
48 | #define EDMA_V3_TCD_NBYTES_DMLOE (1 << 30) |
49 | #define EDMA_V3_TCD_NBYTES_SMLOE (1 << 31) |
50 | |
51 | #define EDMAMUX_CHCFG_DIS 0x0 |
52 | #define EDMAMUX_CHCFG_ENBL 0x80 |
53 | #define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F) |
54 | |
55 | #define DMAMUX_NR 2 |
56 | |
57 | #define EDMA_TCD 0x1000 |
58 | |
59 | #define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ |
60 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
61 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ |
62 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) |
63 | |
64 | #define EDMA_V3_CH_SBR_RD BIT(22) |
65 | #define EDMA_V3_CH_SBR_WR BIT(21) |
66 | #define EDMA_V3_CH_CSR_ERQ BIT(0) |
67 | #define EDMA_V3_CH_CSR_EARQ BIT(1) |
68 | #define EDMA_V3_CH_CSR_EEI BIT(2) |
69 | #define EDMA_V3_CH_CSR_DONE BIT(30) |
70 | #define EDMA_V3_CH_CSR_ACTIVE BIT(31) |
71 | |
72 | enum fsl_edma_pm_state { |
73 | RUNNING = 0, |
74 | SUSPENDED, |
75 | }; |
76 | |
77 | struct fsl_edma_hw_tcd { |
78 | __le32 saddr; |
79 | __le16 soff; |
80 | __le16 attr; |
81 | __le32 nbytes; |
82 | __le32 slast; |
83 | __le32 daddr; |
84 | __le16 doff; |
85 | __le16 citer; |
86 | __le32 dlast_sga; |
87 | __le16 csr; |
88 | __le16 biter; |
89 | }; |
90 | |
91 | struct fsl_edma_hw_tcd64 { |
92 | __le64 saddr; |
93 | __le16 soff; |
94 | __le16 attr; |
95 | __le32 nbytes; |
96 | __le64 slast; |
97 | __le64 daddr; |
98 | __le64 dlast_sga; |
99 | __le16 doff; |
100 | __le16 citer; |
101 | __le16 csr; |
102 | __le16 biter; |
103 | } __packed; |
104 | |
105 | struct fsl_edma3_ch_reg { |
106 | __le32 ch_csr; |
107 | __le32 ch_es; |
108 | __le32 ch_int; |
109 | __le32 ch_sbr; |
110 | __le32 ch_pri; |
111 | __le32 ch_mux; |
112 | __le32 ch_mattr; /* edma4, reserved for edma3 */ |
113 | __le32 ch_reserved; |
114 | union { |
115 | struct fsl_edma_hw_tcd tcd; |
116 | struct fsl_edma_hw_tcd64 tcd64; |
117 | }; |
118 | } __packed; |
119 | |
120 | /* |
121 | * These are iomem pointers, for both v32 and v64. |
122 | */ |
123 | struct edma_regs { |
124 | void __iomem *cr; |
125 | void __iomem *es; |
126 | void __iomem *erqh; |
127 | void __iomem *erql; /* aka erq on v32 */ |
128 | void __iomem *eeih; |
129 | void __iomem *eeil; /* aka eei on v32 */ |
130 | void __iomem *seei; |
131 | void __iomem *ceei; |
132 | void __iomem *serq; |
133 | void __iomem *cerq; |
134 | void __iomem *cint; |
135 | void __iomem *cerr; |
136 | void __iomem *ssrt; |
137 | void __iomem *cdne; |
138 | void __iomem *inth; |
139 | void __iomem *intl; |
140 | void __iomem *errh; |
141 | void __iomem *errl; |
142 | }; |
143 | |
144 | struct fsl_edma_sw_tcd { |
145 | dma_addr_t ptcd; |
146 | void *vtcd; |
147 | }; |
148 | |
149 | struct fsl_edma_chan { |
150 | struct virt_dma_chan vchan; |
151 | enum dma_status status; |
152 | enum fsl_edma_pm_state pm_state; |
153 | bool idle; |
154 | u32 slave_id; |
155 | struct fsl_edma_engine *edma; |
156 | struct fsl_edma_desc *edesc; |
157 | struct dma_slave_config cfg; |
158 | u32 attr; |
159 | bool is_sw; |
160 | struct dma_pool *tcd_pool; |
161 | dma_addr_t dma_dev_addr; |
162 | u32 dma_dev_size; |
163 | enum dma_data_direction dma_dir; |
164 | char chan_name[32]; |
165 | void __iomem *tcd; |
166 | void __iomem *mux_addr; |
167 | u32 real_count; |
168 | struct work_struct issue_worker; |
169 | struct platform_device *pdev; |
170 | struct device *pd_dev; |
171 | u32 srcid; |
172 | struct clk *clk; |
173 | int priority; |
174 | int hw_chanid; |
175 | int txirq; |
176 | bool is_rxchan; |
177 | bool is_remote; |
178 | bool is_multi_fifo; |
179 | }; |
180 | |
181 | struct fsl_edma_desc { |
182 | struct virt_dma_desc vdesc; |
183 | struct fsl_edma_chan *echan; |
184 | bool iscyclic; |
185 | enum dma_transfer_direction dirn; |
186 | unsigned int n_tcds; |
187 | struct fsl_edma_sw_tcd tcd[]; |
188 | }; |
189 | |
190 | #define FSL_EDMA_DRV_HAS_DMACLK BIT(0) |
191 | #define FSL_EDMA_DRV_MUX_SWAP BIT(1) |
192 | #define FSL_EDMA_DRV_CONFIG32 BIT(2) |
193 | #define FSL_EDMA_DRV_WRAP_IO BIT(3) |
194 | #define FSL_EDMA_DRV_EDMA64 BIT(4) |
195 | #define FSL_EDMA_DRV_HAS_PD BIT(5) |
196 | #define FSL_EDMA_DRV_HAS_CHCLK BIT(6) |
197 | #define FSL_EDMA_DRV_HAS_CHMUX BIT(7) |
198 | /* imx8 QM audio edma remote local swapped */ |
199 | #define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8) |
200 | /* control and status register is in tcd address space, edma3 reg layout */ |
201 | #define FSL_EDMA_DRV_SPLIT_REG BIT(9) |
202 | #define FSL_EDMA_DRV_BUS_8BYTE BIT(10) |
203 | #define FSL_EDMA_DRV_DEV_TO_DEV BIT(11) |
204 | #define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12) |
205 | /* Need clean CHn_CSR DONE before enable TCD's ESG */ |
206 | #define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13) |
207 | /* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */ |
208 | #define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14) |
209 | #define FSL_EDMA_DRV_TCD64 BIT(15) |
210 | |
211 | #define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \ |
212 | FSL_EDMA_DRV_BUS_8BYTE | \ |
213 | FSL_EDMA_DRV_DEV_TO_DEV | \ |
214 | FSL_EDMA_DRV_ALIGN_64BYTE | \ |
215 | FSL_EDMA_DRV_CLEAR_DONE_E_SG | \ |
216 | FSL_EDMA_DRV_CLEAR_DONE_E_LINK) |
217 | |
218 | #define FSL_EDMA_DRV_EDMA4 (FSL_EDMA_DRV_SPLIT_REG | \ |
219 | FSL_EDMA_DRV_BUS_8BYTE | \ |
220 | FSL_EDMA_DRV_DEV_TO_DEV | \ |
221 | FSL_EDMA_DRV_ALIGN_64BYTE | \ |
222 | FSL_EDMA_DRV_CLEAR_DONE_E_LINK) |
223 | |
224 | struct fsl_edma_drvdata { |
225 | u32 dmamuxs; /* only used before v3 */ |
226 | u32 chreg_off; |
227 | u32 chreg_space_sz; |
228 | u32 flags; |
229 | u32 mux_off; /* channel mux register offset */ |
230 | u32 mux_skip; /* how much skip for each channel */ |
231 | int (*setup_irq)(struct platform_device *pdev, |
232 | struct fsl_edma_engine *fsl_edma); |
233 | }; |
234 | |
235 | struct fsl_edma_engine { |
236 | struct dma_device dma_dev; |
237 | void __iomem *membase; |
238 | void __iomem *muxbase[DMAMUX_NR]; |
239 | struct clk *muxclk[DMAMUX_NR]; |
240 | struct clk *dmaclk; |
241 | struct clk *chclk; |
242 | struct mutex fsl_edma_mutex; |
243 | const struct fsl_edma_drvdata *drvdata; |
244 | u32 n_chans; |
245 | int txirq; |
246 | int errirq; |
247 | bool big_endian; |
248 | struct edma_regs regs; |
249 | u64 chan_masked; |
250 | struct fsl_edma_chan chans[] __counted_by(n_chans); |
251 | }; |
252 | |
253 | #define edma_read_tcdreg_c(chan, _tcd, __name) \ |
254 | (sizeof((_tcd)->__name) == sizeof(u64) ? \ |
255 | edma_readq(chan->edma, &(_tcd)->__name) : \ |
256 | ((sizeof((_tcd)->__name) == sizeof(u32)) ? \ |
257 | edma_readl(chan->edma, &(_tcd)->__name) : \ |
258 | edma_readw(chan->edma, &(_tcd)->__name) \ |
259 | )) |
260 | |
261 | #define edma_read_tcdreg(chan, __name) \ |
262 | ((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \ |
263 | edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd64 __iomem *)chan->tcd), __name) : \ |
264 | edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \ |
265 | ) |
266 | |
267 | #define edma_write_tcdreg_c(chan, _tcd, _val, __name) \ |
268 | do { \ |
269 | switch (sizeof(_tcd->__name)) { \ |
270 | case sizeof(u64): \ |
271 | edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \ |
272 | break; \ |
273 | case sizeof(u32): \ |
274 | edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \ |
275 | break; \ |
276 | case sizeof(u16): \ |
277 | edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \ |
278 | break; \ |
279 | case sizeof(u8): \ |
280 | edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \ |
281 | break; \ |
282 | } \ |
283 | } while (0) |
284 | |
285 | #define edma_write_tcdreg(chan, val, __name) \ |
286 | do { \ |
287 | struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \ |
288 | struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \ |
289 | \ |
290 | if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \ |
291 | edma_write_tcdreg_c(chan, tcd64_r, val, __name); \ |
292 | else \ |
293 | edma_write_tcdreg_c(chan, tcd_r, val, __name); \ |
294 | } while (0) |
295 | |
296 | #define edma_cp_tcd_to_reg(chan, __tcd, __name) \ |
297 | do { \ |
298 | struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \ |
299 | struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \ |
300 | struct fsl_edma_hw_tcd64 *tcd64_m = (struct fsl_edma_hw_tcd64 *)__tcd; \ |
301 | struct fsl_edma_hw_tcd *tcd_m = (struct fsl_edma_hw_tcd *)__tcd; \ |
302 | \ |
303 | if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \ |
304 | edma_write_tcdreg_c(chan, tcd64_r, tcd64_m->__name, __name); \ |
305 | else \ |
306 | edma_write_tcdreg_c(chan, tcd_r, tcd_m->__name, __name); \ |
307 | } while (0) |
308 | |
309 | #define edma_readl_chreg(chan, __name) \ |
310 | edma_readl(chan->edma, \ |
311 | (void __iomem *)&(container_of(((__force void *)chan->tcd),\ |
312 | struct fsl_edma3_ch_reg, tcd)->__name)) |
313 | |
314 | #define edma_writel_chreg(chan, val, __name) \ |
315 | edma_writel(chan->edma, val, \ |
316 | (void __iomem *)&(container_of(((__force void *)chan->tcd),\ |
317 | struct fsl_edma3_ch_reg, tcd)->__name)) |
318 | |
319 | #define fsl_edma_get_tcd(_chan, _tcd, _field) \ |
320 | (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? (((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \ |
321 | (((struct fsl_edma_hw_tcd *)_tcd)->_field)) |
322 | |
323 | #define fsl_edma_le_to_cpu(x) \ |
324 | (sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \ |
325 | (sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \ |
326 | le16_to_cpu((__force __le16)(x)))) |
327 | |
328 | #define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \ |
329 | (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \ |
330 | fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \ |
331 | fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field)) |
332 | |
333 | #define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \ |
334 | do { \ |
335 | switch (sizeof((_tcd)->_field)) { \ |
336 | case sizeof(u64): \ |
337 | *(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \ |
338 | break; \ |
339 | case sizeof(u32): \ |
340 | *(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \ |
341 | break; \ |
342 | case sizeof(u16): \ |
343 | *(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \ |
344 | break; \ |
345 | } \ |
346 | } while (0) |
347 | |
348 | #define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \ |
349 | do { \ |
350 | if (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64) \ |
351 | fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd64 *)_tcd, _val, _field); \ |
352 | else \ |
353 | fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \ |
354 | } while (0) |
355 | |
356 | /* |
357 | * R/W functions for big- or little-endian registers: |
358 | * The eDMA controller's endian is independent of the CPU core's endian. |
359 | * For the big-endian IP module, the offset for 8-bit or 16-bit registers |
360 | * should also be swapped opposite to that in little-endian IP. |
361 | */ |
362 | static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr) |
363 | { |
364 | u64 l, h; |
365 | |
366 | if (edma->big_endian) { |
367 | l = ioread32be(addr); |
368 | h = ioread32be(addr + 4); |
369 | } else { |
370 | l = ioread32(addr); |
371 | h = ioread32(addr + 4); |
372 | } |
373 | |
374 | return (h << 32) | l; |
375 | } |
376 | |
377 | static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) |
378 | { |
379 | if (edma->big_endian) |
380 | return ioread32be(addr); |
381 | else |
382 | return ioread32(addr); |
383 | } |
384 | |
385 | static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr) |
386 | { |
387 | if (edma->big_endian) |
388 | return ioread16be(addr); |
389 | else |
390 | return ioread16(addr); |
391 | } |
392 | |
393 | static inline void edma_writeb(struct fsl_edma_engine *edma, |
394 | u8 val, void __iomem *addr) |
395 | { |
396 | /* swap the reg offset for these in big-endian mode */ |
397 | if (edma->big_endian) |
398 | iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3)); |
399 | else |
400 | iowrite8(val, addr); |
401 | } |
402 | |
403 | static inline void edma_writew(struct fsl_edma_engine *edma, |
404 | u16 val, void __iomem *addr) |
405 | { |
406 | /* swap the reg offset for these in big-endian mode */ |
407 | if (edma->big_endian) |
408 | iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2)); |
409 | else |
410 | iowrite16(val, addr); |
411 | } |
412 | |
413 | static inline void edma_writel(struct fsl_edma_engine *edma, |
414 | u32 val, void __iomem *addr) |
415 | { |
416 | if (edma->big_endian) |
417 | iowrite32be(val, addr); |
418 | else |
419 | iowrite32(val, addr); |
420 | } |
421 | |
422 | static inline void edma_writeq(struct fsl_edma_engine *edma, |
423 | u64 val, void __iomem *addr) |
424 | { |
425 | if (edma->big_endian) { |
426 | iowrite32be(val & 0xFFFFFFFF, addr); |
427 | iowrite32be(val >> 32, addr + 4); |
428 | } else { |
429 | iowrite32(val & 0xFFFFFFFF, addr); |
430 | iowrite32(val >> 32, addr + 4); |
431 | } |
432 | } |
433 | |
434 | static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan) |
435 | { |
436 | return container_of(chan, struct fsl_edma_chan, vchan.chan); |
437 | } |
438 | |
439 | static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan) |
440 | { |
441 | return fsl_chan->edma->drvdata->flags; |
442 | } |
443 | |
444 | static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd) |
445 | { |
446 | return container_of(vd, struct fsl_edma_desc, vdesc); |
447 | } |
448 | |
449 | static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan) |
450 | { |
451 | fsl_chan->status = DMA_ERROR; |
452 | fsl_chan->idle = true; |
453 | } |
454 | |
455 | void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan); |
456 | void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan); |
457 | void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, |
458 | unsigned int slot, bool enable); |
459 | void fsl_edma_free_desc(struct virt_dma_desc *vdesc); |
460 | int fsl_edma_terminate_all(struct dma_chan *chan); |
461 | int fsl_edma_pause(struct dma_chan *chan); |
462 | int fsl_edma_resume(struct dma_chan *chan); |
463 | int fsl_edma_slave_config(struct dma_chan *chan, |
464 | struct dma_slave_config *cfg); |
465 | enum dma_status fsl_edma_tx_status(struct dma_chan *chan, |
466 | dma_cookie_t cookie, struct dma_tx_state *txstate); |
467 | struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( |
468 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
469 | size_t period_len, enum dma_transfer_direction direction, |
470 | unsigned long flags); |
471 | struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( |
472 | struct dma_chan *chan, struct scatterlist *sgl, |
473 | unsigned int sg_len, enum dma_transfer_direction direction, |
474 | unsigned long flags, void *context); |
475 | struct dma_async_tx_descriptor *fsl_edma_prep_memcpy( |
476 | struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src, |
477 | size_t len, unsigned long flags); |
478 | void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan); |
479 | void fsl_edma_issue_pending(struct dma_chan *chan); |
480 | int fsl_edma_alloc_chan_resources(struct dma_chan *chan); |
481 | void fsl_edma_free_chan_resources(struct dma_chan *chan); |
482 | void fsl_edma_cleanup_vchan(struct dma_device *dmadev); |
483 | void fsl_edma_setup_regs(struct fsl_edma_engine *edma); |
484 | |
485 | #endif /* _FSL_EDMA_COMMON_H_ */ |
486 | |