1 | // SPDX-License-Identifier: GPL-2.0-or-later |
---|---|
2 | /* |
3 | * DMA driver for Xilinx Video DMA Engine |
4 | * |
5 | * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. |
6 | * |
7 | * Based on the Freescale DMA driver. |
8 | * |
9 | * Description: |
10 | * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP |
11 | * core that provides high-bandwidth direct memory access between memory |
12 | * and AXI4-Stream type video target peripherals. The core provides efficient |
13 | * two dimensional DMA operations with independent asynchronous read (S2MM) |
14 | * and write (MM2S) channel operation. It can be configured to have either |
15 | * one channel or two channels. If configured as two channels, one is to |
16 | * transmit to the video device (MM2S) and another is to receive from the |
17 | * video device (S2MM). Initialization, status, interrupt and management |
18 | * registers are accessed through an AXI4-Lite slave interface. |
19 | * |
20 | * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that |
21 | * provides high-bandwidth one dimensional direct memory access between memory |
22 | * and AXI4-Stream target peripherals. It supports one receive and one |
23 | * transmit channel, both of them optional at synthesis time. |
24 | * |
25 | * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory |
26 | * Access (DMA) between a memory-mapped source address and a memory-mapped |
27 | * destination address. |
28 | * |
29 | * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft |
30 | * Xilinx IP that provides high-bandwidth direct memory access between |
31 | * memory and AXI4-Stream target peripherals. It provides scatter gather |
32 | * (SG) interface with multiple channels independent configuration support. |
33 | * |
34 | */ |
35 | |
36 | #include <linux/bitops.h> |
37 | #include <linux/dmapool.h> |
38 | #include <linux/dma/xilinx_dma.h> |
39 | #include <linux/init.h> |
40 | #include <linux/interrupt.h> |
41 | #include <linux/io.h> |
42 | #include <linux/iopoll.h> |
43 | #include <linux/module.h> |
44 | #include <linux/of.h> |
45 | #include <linux/of_dma.h> |
46 | #include <linux/of_irq.h> |
47 | #include <linux/platform_device.h> |
48 | #include <linux/slab.h> |
49 | #include <linux/clk.h> |
50 | #include <linux/io-64-nonatomic-lo-hi.h> |
51 | |
52 | #include "../dmaengine.h" |
53 | |
54 | /* Register/Descriptor Offsets */ |
55 | #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 |
56 | #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 |
57 | #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 |
58 | #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 |
59 | |
60 | /* Control Registers */ |
61 | #define XILINX_DMA_REG_DMACR 0x0000 |
62 | #define XILINX_DMA_DMACR_DELAY_MAX 0xff |
63 | #define XILINX_DMA_DMACR_DELAY_SHIFT 24 |
64 | #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff |
65 | #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 |
66 | #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) |
67 | #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) |
68 | #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) |
69 | #define XILINX_DMA_DMACR_MASTER_SHIFT 8 |
70 | #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 |
71 | #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) |
72 | #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) |
73 | #define XILINX_DMA_DMACR_RESET BIT(2) |
74 | #define XILINX_DMA_DMACR_CIRC_EN BIT(1) |
75 | #define XILINX_DMA_DMACR_RUNSTOP BIT(0) |
76 | #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) |
77 | #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24) |
78 | #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16) |
79 | #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8) |
80 | |
81 | #define XILINX_DMA_REG_DMASR 0x0004 |
82 | #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) |
83 | #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) |
84 | #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) |
85 | #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) |
86 | #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) |
87 | #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) |
88 | #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) |
89 | #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) |
90 | #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) |
91 | #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) |
92 | #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) |
93 | #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) |
94 | #define XILINX_DMA_DMASR_SG_MASK BIT(3) |
95 | #define XILINX_DMA_DMASR_IDLE BIT(1) |
96 | #define XILINX_DMA_DMASR_HALTED BIT(0) |
97 | #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) |
98 | #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) |
99 | |
100 | #define XILINX_DMA_REG_CURDESC 0x0008 |
101 | #define XILINX_DMA_REG_TAILDESC 0x0010 |
102 | #define XILINX_DMA_REG_REG_INDEX 0x0014 |
103 | #define XILINX_DMA_REG_FRMSTORE 0x0018 |
104 | #define XILINX_DMA_REG_THRESHOLD 0x001c |
105 | #define XILINX_DMA_REG_FRMPTR_STS 0x0024 |
106 | #define XILINX_DMA_REG_PARK_PTR 0x0028 |
107 | #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 |
108 | #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8) |
109 | #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 |
110 | #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0) |
111 | #define XILINX_DMA_REG_VDMA_VERSION 0x002c |
112 | |
113 | /* Register Direct Mode Registers */ |
114 | #define XILINX_DMA_REG_VSIZE 0x0000 |
115 | #define XILINX_DMA_VSIZE_MASK GENMASK(12, 0) |
116 | #define XILINX_DMA_REG_HSIZE 0x0004 |
117 | #define XILINX_DMA_HSIZE_MASK GENMASK(15, 0) |
118 | |
119 | #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 |
120 | #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 |
121 | #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 |
122 | |
123 | #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) |
124 | #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) |
125 | |
126 | #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec |
127 | #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) |
128 | |
129 | /* HW specific definitions */ |
130 | #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20 |
131 | #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 |
132 | #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1 |
133 | |
134 | #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ |
135 | (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ |
136 | XILINX_DMA_DMASR_DLY_CNT_IRQ | \ |
137 | XILINX_DMA_DMASR_ERR_IRQ) |
138 | |
139 | #define XILINX_DMA_DMASR_ALL_ERR_MASK \ |
140 | (XILINX_DMA_DMASR_EOL_LATE_ERR | \ |
141 | XILINX_DMA_DMASR_SOF_LATE_ERR | \ |
142 | XILINX_DMA_DMASR_SG_DEC_ERR | \ |
143 | XILINX_DMA_DMASR_SG_SLV_ERR | \ |
144 | XILINX_DMA_DMASR_EOF_EARLY_ERR | \ |
145 | XILINX_DMA_DMASR_SOF_EARLY_ERR | \ |
146 | XILINX_DMA_DMASR_DMA_DEC_ERR | \ |
147 | XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ |
148 | XILINX_DMA_DMASR_DMA_INT_ERR) |
149 | |
150 | /* |
151 | * Recoverable errors are DMA Internal error, SOF Early, EOF Early |
152 | * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC |
153 | * is enabled in the h/w system. |
154 | */ |
155 | #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ |
156 | (XILINX_DMA_DMASR_SOF_LATE_ERR | \ |
157 | XILINX_DMA_DMASR_EOF_EARLY_ERR | \ |
158 | XILINX_DMA_DMASR_SOF_EARLY_ERR | \ |
159 | XILINX_DMA_DMASR_DMA_INT_ERR) |
160 | |
161 | /* Axi VDMA Flush on Fsync bits */ |
162 | #define XILINX_DMA_FLUSH_S2MM 3 |
163 | #define XILINX_DMA_FLUSH_MM2S 2 |
164 | #define XILINX_DMA_FLUSH_BOTH 1 |
165 | |
166 | /* Delay loop counter to prevent hardware failure */ |
167 | #define XILINX_DMA_LOOP_COUNT 1000000 |
168 | |
169 | /* AXI DMA Specific Registers/Offsets */ |
170 | #define XILINX_DMA_REG_SRCDSTADDR 0x18 |
171 | #define XILINX_DMA_REG_BTT 0x28 |
172 | |
173 | /* AXI DMA Specific Masks/Bit fields */ |
174 | #define XILINX_DMA_MAX_TRANS_LEN_MIN 8 |
175 | #define XILINX_DMA_MAX_TRANS_LEN_MAX 23 |
176 | #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 |
177 | #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) |
178 | #define XILINX_DMA_CR_DELAY_MAX GENMASK(31, 24) |
179 | #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) |
180 | #define XILINX_DMA_CR_COALESCE_SHIFT 16 |
181 | #define XILINX_DMA_CR_DELAY_SHIFT 24 |
182 | #define XILINX_DMA_BD_SOP BIT(27) |
183 | #define XILINX_DMA_BD_EOP BIT(26) |
184 | #define XILINX_DMA_BD_COMP_MASK BIT(31) |
185 | #define XILINX_DMA_COALESCE_MAX 255 |
186 | #define XILINX_DMA_NUM_DESCS 512 |
187 | #define XILINX_DMA_NUM_APP_WORDS 5 |
188 | |
189 | /* AXI CDMA Specific Registers/Offsets */ |
190 | #define XILINX_CDMA_REG_SRCADDR 0x18 |
191 | #define XILINX_CDMA_REG_DSTADDR 0x20 |
192 | |
193 | /* AXI CDMA Specific Masks */ |
194 | #define XILINX_CDMA_CR_SGMODE BIT(3) |
195 | |
196 | #define xilinx_prep_dma_addr_t(addr) \ |
197 | ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) |
198 | |
199 | /* AXI MCDMA Specific Registers/Offsets */ |
200 | #define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000 |
201 | #define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500 |
202 | #define XILINX_MCDMA_CHEN_OFFSET 0x0008 |
203 | #define XILINX_MCDMA_CH_ERR_OFFSET 0x0010 |
204 | #define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020 |
205 | #define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028 |
206 | #define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40) |
207 | #define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40) |
208 | #define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40) |
209 | #define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40) |
210 | |
211 | /* AXI MCDMA Specific Masks/Shifts */ |
212 | #define XILINX_MCDMA_COALESCE_SHIFT 16 |
213 | #define XILINX_MCDMA_COALESCE_MAX 24 |
214 | #define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5) |
215 | #define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16) |
216 | #define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0) |
217 | #define XILINX_MCDMA_IRQ_IOC_MASK BIT(5) |
218 | #define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6) |
219 | #define XILINX_MCDMA_IRQ_ERR_MASK BIT(7) |
220 | #define XILINX_MCDMA_BD_EOP BIT(30) |
221 | #define XILINX_MCDMA_BD_SOP BIT(31) |
222 | |
223 | /** |
224 | * struct xilinx_vdma_desc_hw - Hardware Descriptor |
225 | * @next_desc: Next Descriptor Pointer @0x00 |
226 | * @pad1: Reserved @0x04 |
227 | * @buf_addr: Buffer address @0x08 |
228 | * @buf_addr_msb: MSB of Buffer address @0x0C |
229 | * @vsize: Vertical Size @0x10 |
230 | * @hsize: Horizontal Size @0x14 |
231 | * @stride: Number of bytes between the first |
232 | * pixels of each horizontal line @0x18 |
233 | */ |
234 | struct xilinx_vdma_desc_hw { |
235 | u32 next_desc; |
236 | u32 pad1; |
237 | u32 buf_addr; |
238 | u32 buf_addr_msb; |
239 | u32 vsize; |
240 | u32 hsize; |
241 | u32 stride; |
242 | } __aligned(64); |
243 | |
244 | /** |
245 | * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA |
246 | * @next_desc: Next Descriptor Pointer @0x00 |
247 | * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 |
248 | * @buf_addr: Buffer address @0x08 |
249 | * @buf_addr_msb: MSB of Buffer address @0x0C |
250 | * @reserved1: Reserved @0x10 |
251 | * @reserved2: Reserved @0x14 |
252 | * @control: Control field @0x18 |
253 | * @status: Status field @0x1C |
254 | * @app: APP Fields @0x20 - 0x30 |
255 | */ |
256 | struct xilinx_axidma_desc_hw { |
257 | u32 next_desc; |
258 | u32 next_desc_msb; |
259 | u32 buf_addr; |
260 | u32 buf_addr_msb; |
261 | u32 reserved1; |
262 | u32 reserved2; |
263 | u32 control; |
264 | u32 status; |
265 | u32 app[XILINX_DMA_NUM_APP_WORDS]; |
266 | } __aligned(64); |
267 | |
268 | /** |
269 | * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA |
270 | * @next_desc: Next Descriptor Pointer @0x00 |
271 | * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 |
272 | * @buf_addr: Buffer address @0x08 |
273 | * @buf_addr_msb: MSB of Buffer address @0x0C |
274 | * @rsvd: Reserved field @0x10 |
275 | * @control: Control Information field @0x14 |
276 | * @status: Status field @0x18 |
277 | * @sideband_status: Status of sideband signals @0x1C |
278 | * @app: APP Fields @0x20 - 0x30 |
279 | */ |
280 | struct xilinx_aximcdma_desc_hw { |
281 | u32 next_desc; |
282 | u32 next_desc_msb; |
283 | u32 buf_addr; |
284 | u32 buf_addr_msb; |
285 | u32 rsvd; |
286 | u32 control; |
287 | u32 status; |
288 | u32 sideband_status; |
289 | u32 app[XILINX_DMA_NUM_APP_WORDS]; |
290 | } __aligned(64); |
291 | |
292 | /** |
293 | * struct xilinx_cdma_desc_hw - Hardware Descriptor |
294 | * @next_desc: Next Descriptor Pointer @0x00 |
295 | * @next_desc_msb: Next Descriptor Pointer MSB @0x04 |
296 | * @src_addr: Source address @0x08 |
297 | * @src_addr_msb: Source address MSB @0x0C |
298 | * @dest_addr: Destination address @0x10 |
299 | * @dest_addr_msb: Destination address MSB @0x14 |
300 | * @control: Control field @0x18 |
301 | * @status: Status field @0x1C |
302 | */ |
303 | struct xilinx_cdma_desc_hw { |
304 | u32 next_desc; |
305 | u32 next_desc_msb; |
306 | u32 src_addr; |
307 | u32 src_addr_msb; |
308 | u32 dest_addr; |
309 | u32 dest_addr_msb; |
310 | u32 control; |
311 | u32 status; |
312 | } __aligned(64); |
313 | |
314 | /** |
315 | * struct xilinx_vdma_tx_segment - Descriptor segment |
316 | * @hw: Hardware descriptor |
317 | * @node: Node in the descriptor segments list |
318 | * @phys: Physical address of segment |
319 | */ |
320 | struct xilinx_vdma_tx_segment { |
321 | struct xilinx_vdma_desc_hw hw; |
322 | struct list_head node; |
323 | dma_addr_t phys; |
324 | } __aligned(64); |
325 | |
326 | /** |
327 | * struct xilinx_axidma_tx_segment - Descriptor segment |
328 | * @hw: Hardware descriptor |
329 | * @node: Node in the descriptor segments list |
330 | * @phys: Physical address of segment |
331 | */ |
332 | struct xilinx_axidma_tx_segment { |
333 | struct xilinx_axidma_desc_hw hw; |
334 | struct list_head node; |
335 | dma_addr_t phys; |
336 | } __aligned(64); |
337 | |
338 | /** |
339 | * struct xilinx_aximcdma_tx_segment - Descriptor segment |
340 | * @hw: Hardware descriptor |
341 | * @node: Node in the descriptor segments list |
342 | * @phys: Physical address of segment |
343 | */ |
344 | struct xilinx_aximcdma_tx_segment { |
345 | struct xilinx_aximcdma_desc_hw hw; |
346 | struct list_head node; |
347 | dma_addr_t phys; |
348 | } __aligned(64); |
349 | |
350 | /** |
351 | * struct xilinx_cdma_tx_segment - Descriptor segment |
352 | * @hw: Hardware descriptor |
353 | * @node: Node in the descriptor segments list |
354 | * @phys: Physical address of segment |
355 | */ |
356 | struct xilinx_cdma_tx_segment { |
357 | struct xilinx_cdma_desc_hw hw; |
358 | struct list_head node; |
359 | dma_addr_t phys; |
360 | } __aligned(64); |
361 | |
362 | /** |
363 | * struct xilinx_dma_tx_descriptor - Per Transaction structure |
364 | * @async_tx: Async transaction descriptor |
365 | * @segments: TX segments list |
366 | * @node: Node in the channel descriptors list |
367 | * @cyclic: Check for cyclic transfers. |
368 | * @err: Whether the descriptor has an error. |
369 | * @residue: Residue of the completed descriptor |
370 | */ |
371 | struct xilinx_dma_tx_descriptor { |
372 | struct dma_async_tx_descriptor async_tx; |
373 | struct list_head segments; |
374 | struct list_head node; |
375 | bool cyclic; |
376 | bool err; |
377 | u32 residue; |
378 | }; |
379 | |
380 | /** |
381 | * struct xilinx_dma_chan - Driver specific DMA channel structure |
382 | * @xdev: Driver specific device structure |
383 | * @ctrl_offset: Control registers offset |
384 | * @desc_offset: TX descriptor registers offset |
385 | * @lock: Descriptor operation lock |
386 | * @pending_list: Descriptors waiting |
387 | * @active_list: Descriptors ready to submit |
388 | * @done_list: Complete descriptors |
389 | * @free_seg_list: Free descriptors |
390 | * @common: DMA common channel |
391 | * @desc_pool: Descriptors pool |
392 | * @dev: The dma device |
393 | * @irq: Channel IRQ |
394 | * @id: Channel ID |
395 | * @direction: Transfer direction |
396 | * @num_frms: Number of frames |
397 | * @has_sg: Support scatter transfers |
398 | * @cyclic: Check for cyclic transfers. |
399 | * @genlock: Support genlock mode |
400 | * @err: Channel has errors |
401 | * @idle: Check for channel idle |
402 | * @terminating: Check for channel being synchronized by user |
403 | * @tasklet: Cleanup work after irq |
404 | * @config: Device configuration info |
405 | * @flush_on_fsync: Flush on Frame sync |
406 | * @desc_pendingcount: Descriptor pending count |
407 | * @ext_addr: Indicates 64 bit addressing is supported by dma channel |
408 | * @desc_submitcount: Descriptor h/w submitted count |
409 | * @seg_v: Statically allocated segments base |
410 | * @seg_mv: Statically allocated segments base for MCDMA |
411 | * @seg_p: Physical allocated segments base |
412 | * @cyclic_seg_v: Statically allocated segment base for cyclic transfers |
413 | * @cyclic_seg_p: Physical allocated segments base for cyclic dma |
414 | * @start_transfer: Differentiate b/w DMA IP's transfer |
415 | * @stop_transfer: Differentiate b/w DMA IP's quiesce |
416 | * @tdest: TDEST value for mcdma |
417 | * @has_vflip: S2MM vertical flip |
418 | * @irq_delay: Interrupt delay timeout |
419 | */ |
420 | struct xilinx_dma_chan { |
421 | struct xilinx_dma_device *xdev; |
422 | u32 ctrl_offset; |
423 | u32 desc_offset; |
424 | spinlock_t lock; |
425 | struct list_head pending_list; |
426 | struct list_head active_list; |
427 | struct list_head done_list; |
428 | struct list_head free_seg_list; |
429 | struct dma_chan common; |
430 | struct dma_pool *desc_pool; |
431 | struct device *dev; |
432 | int irq; |
433 | int id; |
434 | enum dma_transfer_direction direction; |
435 | int num_frms; |
436 | bool has_sg; |
437 | bool cyclic; |
438 | bool genlock; |
439 | bool err; |
440 | bool idle; |
441 | bool terminating; |
442 | struct tasklet_struct tasklet; |
443 | struct xilinx_vdma_config config; |
444 | bool flush_on_fsync; |
445 | u32 desc_pendingcount; |
446 | bool ext_addr; |
447 | u32 desc_submitcount; |
448 | struct xilinx_axidma_tx_segment *seg_v; |
449 | struct xilinx_aximcdma_tx_segment *seg_mv; |
450 | dma_addr_t seg_p; |
451 | struct xilinx_axidma_tx_segment *cyclic_seg_v; |
452 | dma_addr_t cyclic_seg_p; |
453 | void (*start_transfer)(struct xilinx_dma_chan *chan); |
454 | int (*stop_transfer)(struct xilinx_dma_chan *chan); |
455 | u16 tdest; |
456 | bool has_vflip; |
457 | u8 irq_delay; |
458 | }; |
459 | |
460 | /** |
461 | * enum xdma_ip_type - DMA IP type. |
462 | * |
463 | * @XDMA_TYPE_AXIDMA: Axi dma ip. |
464 | * @XDMA_TYPE_CDMA: Axi cdma ip. |
465 | * @XDMA_TYPE_VDMA: Axi vdma ip. |
466 | * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip. |
467 | * |
468 | */ |
469 | enum xdma_ip_type { |
470 | XDMA_TYPE_AXIDMA = 0, |
471 | XDMA_TYPE_CDMA, |
472 | XDMA_TYPE_VDMA, |
473 | XDMA_TYPE_AXIMCDMA |
474 | }; |
475 | |
476 | struct xilinx_dma_config { |
477 | enum xdma_ip_type dmatype; |
478 | int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, |
479 | struct clk **tx_clk, struct clk **txs_clk, |
480 | struct clk **rx_clk, struct clk **rxs_clk); |
481 | irqreturn_t (*irq_handler)(int irq, void *data); |
482 | const int max_channels; |
483 | }; |
484 | |
485 | /** |
486 | * struct xilinx_dma_device - DMA device structure |
487 | * @regs: I/O mapped base address |
488 | * @dev: Device Structure |
489 | * @common: DMA device structure |
490 | * @chan: Driver specific DMA channel |
491 | * @flush_on_fsync: Flush on frame sync |
492 | * @ext_addr: Indicates 64 bit addressing is supported by dma device |
493 | * @pdev: Platform device structure pointer |
494 | * @dma_config: DMA config structure |
495 | * @axi_clk: DMA Axi4-lite interace clock |
496 | * @tx_clk: DMA mm2s clock |
497 | * @txs_clk: DMA mm2s stream clock |
498 | * @rx_clk: DMA s2mm clock |
499 | * @rxs_clk: DMA s2mm stream clock |
500 | * @s2mm_chan_id: DMA s2mm channel identifier |
501 | * @mm2s_chan_id: DMA mm2s channel identifier |
502 | * @max_buffer_len: Max buffer length |
503 | * @has_axistream_connected: AXI DMA connected to AXI Stream IP |
504 | */ |
505 | struct xilinx_dma_device { |
506 | void __iomem *regs; |
507 | struct device *dev; |
508 | struct dma_device common; |
509 | struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE]; |
510 | u32 flush_on_fsync; |
511 | bool ext_addr; |
512 | struct platform_device *pdev; |
513 | const struct xilinx_dma_config *dma_config; |
514 | struct clk *axi_clk; |
515 | struct clk *tx_clk; |
516 | struct clk *txs_clk; |
517 | struct clk *rx_clk; |
518 | struct clk *rxs_clk; |
519 | u32 s2mm_chan_id; |
520 | u32 mm2s_chan_id; |
521 | u32 max_buffer_len; |
522 | bool has_axistream_connected; |
523 | }; |
524 | |
525 | /* Macros */ |
526 | #define to_xilinx_chan(chan) \ |
527 | container_of(chan, struct xilinx_dma_chan, common) |
528 | #define to_dma_tx_descriptor(tx) \ |
529 | container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) |
530 | #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ |
531 | readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \ |
532 | val, cond, delay_us, timeout_us) |
533 | |
534 | /* IO accessors */ |
535 | static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) |
536 | { |
537 | return ioread32(chan->xdev->regs + reg); |
538 | } |
539 | |
540 | static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) |
541 | { |
542 | iowrite32(value, chan->xdev->regs + reg); |
543 | } |
544 | |
545 | static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, |
546 | u32 value) |
547 | { |
548 | dma_write(chan, reg: chan->desc_offset + reg, value); |
549 | } |
550 | |
551 | static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) |
552 | { |
553 | return dma_read(chan, reg: chan->ctrl_offset + reg); |
554 | } |
555 | |
556 | static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, |
557 | u32 value) |
558 | { |
559 | dma_write(chan, reg: chan->ctrl_offset + reg, value); |
560 | } |
561 | |
562 | static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, |
563 | u32 clr) |
564 | { |
565 | dma_ctrl_write(chan, reg, value: dma_ctrl_read(chan, reg) & ~clr); |
566 | } |
567 | |
568 | static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, |
569 | u32 set) |
570 | { |
571 | dma_ctrl_write(chan, reg, value: dma_ctrl_read(chan, reg) | set); |
572 | } |
573 | |
574 | /** |
575 | * vdma_desc_write_64 - 64-bit descriptor write |
576 | * @chan: Driver specific VDMA channel |
577 | * @reg: Register to write |
578 | * @value_lsb: lower address of the descriptor. |
579 | * @value_msb: upper address of the descriptor. |
580 | * |
581 | * Since vdma driver is trying to write to a register offset which is not a |
582 | * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits |
583 | * instead of a single 64 bit register write. |
584 | */ |
585 | static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, |
586 | u32 value_lsb, u32 value_msb) |
587 | { |
588 | /* Write the lsb 32 bits*/ |
589 | writel(val: value_lsb, addr: chan->xdev->regs + chan->desc_offset + reg); |
590 | |
591 | /* Write the msb 32 bits */ |
592 | writel(val: value_msb, addr: chan->xdev->regs + chan->desc_offset + reg + 4); |
593 | } |
594 | |
595 | static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) |
596 | { |
597 | lo_hi_writeq(val: value, addr: chan->xdev->regs + chan->ctrl_offset + reg); |
598 | } |
599 | |
600 | static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, |
601 | dma_addr_t addr) |
602 | { |
603 | if (chan->ext_addr) |
604 | dma_writeq(chan, reg, value: addr); |
605 | else |
606 | dma_ctrl_write(chan, reg, value: addr); |
607 | } |
608 | |
609 | static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, |
610 | struct xilinx_axidma_desc_hw *hw, |
611 | dma_addr_t buf_addr, size_t sg_used, |
612 | size_t period_len) |
613 | { |
614 | if (chan->ext_addr) { |
615 | hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); |
616 | hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + |
617 | period_len); |
618 | } else { |
619 | hw->buf_addr = buf_addr + sg_used + period_len; |
620 | } |
621 | } |
622 | |
623 | static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan, |
624 | struct xilinx_aximcdma_desc_hw *hw, |
625 | dma_addr_t buf_addr, size_t sg_used) |
626 | { |
627 | if (chan->ext_addr) { |
628 | hw->buf_addr = lower_32_bits(buf_addr + sg_used); |
629 | hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used); |
630 | } else { |
631 | hw->buf_addr = buf_addr + sg_used; |
632 | } |
633 | } |
634 | |
635 | /** |
636 | * xilinx_dma_get_metadata_ptr- Populate metadata pointer and payload length |
637 | * @tx: async transaction descriptor |
638 | * @payload_len: metadata payload length |
639 | * @max_len: metadata max length |
640 | * Return: The app field pointer. |
641 | */ |
642 | static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx, |
643 | size_t *payload_len, size_t *max_len) |
644 | { |
645 | struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); |
646 | struct xilinx_axidma_tx_segment *seg; |
647 | |
648 | *max_len = *payload_len = sizeof(u32) * XILINX_DMA_NUM_APP_WORDS; |
649 | seg = list_first_entry(&desc->segments, |
650 | struct xilinx_axidma_tx_segment, node); |
651 | return seg->hw.app; |
652 | } |
653 | |
654 | static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = { |
655 | .get_ptr = xilinx_dma_get_metadata_ptr, |
656 | }; |
657 | |
658 | /* ----------------------------------------------------------------------------- |
659 | * Descriptors and segments alloc and free |
660 | */ |
661 | |
662 | /** |
663 | * xilinx_vdma_alloc_tx_segment - Allocate transaction segment |
664 | * @chan: Driver specific DMA channel |
665 | * |
666 | * Return: The allocated segment on success and NULL on failure. |
667 | */ |
668 | static struct xilinx_vdma_tx_segment * |
669 | xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) |
670 | { |
671 | struct xilinx_vdma_tx_segment *segment; |
672 | dma_addr_t phys; |
673 | |
674 | segment = dma_pool_zalloc(pool: chan->desc_pool, GFP_ATOMIC, handle: &phys); |
675 | if (!segment) |
676 | return NULL; |
677 | |
678 | segment->phys = phys; |
679 | |
680 | return segment; |
681 | } |
682 | |
683 | /** |
684 | * xilinx_cdma_alloc_tx_segment - Allocate transaction segment |
685 | * @chan: Driver specific DMA channel |
686 | * |
687 | * Return: The allocated segment on success and NULL on failure. |
688 | */ |
689 | static struct xilinx_cdma_tx_segment * |
690 | xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) |
691 | { |
692 | struct xilinx_cdma_tx_segment *segment; |
693 | dma_addr_t phys; |
694 | |
695 | segment = dma_pool_zalloc(pool: chan->desc_pool, GFP_ATOMIC, handle: &phys); |
696 | if (!segment) |
697 | return NULL; |
698 | |
699 | segment->phys = phys; |
700 | |
701 | return segment; |
702 | } |
703 | |
704 | /** |
705 | * xilinx_axidma_alloc_tx_segment - Allocate transaction segment |
706 | * @chan: Driver specific DMA channel |
707 | * |
708 | * Return: The allocated segment on success and NULL on failure. |
709 | */ |
710 | static struct xilinx_axidma_tx_segment * |
711 | xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) |
712 | { |
713 | struct xilinx_axidma_tx_segment *segment = NULL; |
714 | unsigned long flags; |
715 | |
716 | spin_lock_irqsave(&chan->lock, flags); |
717 | if (!list_empty(head: &chan->free_seg_list)) { |
718 | segment = list_first_entry(&chan->free_seg_list, |
719 | struct xilinx_axidma_tx_segment, |
720 | node); |
721 | list_del(entry: &segment->node); |
722 | } |
723 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
724 | |
725 | if (!segment) |
726 | dev_dbg(chan->dev, "Could not find free tx segment\n"); |
727 | |
728 | return segment; |
729 | } |
730 | |
731 | /** |
732 | * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment |
733 | * @chan: Driver specific DMA channel |
734 | * |
735 | * Return: The allocated segment on success and NULL on failure. |
736 | */ |
737 | static struct xilinx_aximcdma_tx_segment * |
738 | xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan) |
739 | { |
740 | struct xilinx_aximcdma_tx_segment *segment = NULL; |
741 | unsigned long flags; |
742 | |
743 | spin_lock_irqsave(&chan->lock, flags); |
744 | if (!list_empty(head: &chan->free_seg_list)) { |
745 | segment = list_first_entry(&chan->free_seg_list, |
746 | struct xilinx_aximcdma_tx_segment, |
747 | node); |
748 | list_del(entry: &segment->node); |
749 | } |
750 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
751 | |
752 | return segment; |
753 | } |
754 | |
755 | static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) |
756 | { |
757 | u32 next_desc = hw->next_desc; |
758 | u32 next_desc_msb = hw->next_desc_msb; |
759 | |
760 | memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw)); |
761 | |
762 | hw->next_desc = next_desc; |
763 | hw->next_desc_msb = next_desc_msb; |
764 | } |
765 | |
766 | static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw) |
767 | { |
768 | u32 next_desc = hw->next_desc; |
769 | u32 next_desc_msb = hw->next_desc_msb; |
770 | |
771 | memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw)); |
772 | |
773 | hw->next_desc = next_desc; |
774 | hw->next_desc_msb = next_desc_msb; |
775 | } |
776 | |
777 | /** |
778 | * xilinx_dma_free_tx_segment - Free transaction segment |
779 | * @chan: Driver specific DMA channel |
780 | * @segment: DMA transaction segment |
781 | */ |
782 | static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, |
783 | struct xilinx_axidma_tx_segment *segment) |
784 | { |
785 | xilinx_dma_clean_hw_desc(hw: &segment->hw); |
786 | |
787 | list_add_tail(new: &segment->node, head: &chan->free_seg_list); |
788 | } |
789 | |
790 | /** |
791 | * xilinx_mcdma_free_tx_segment - Free transaction segment |
792 | * @chan: Driver specific DMA channel |
793 | * @segment: DMA transaction segment |
794 | */ |
795 | static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan, |
796 | struct xilinx_aximcdma_tx_segment * |
797 | segment) |
798 | { |
799 | xilinx_mcdma_clean_hw_desc(hw: &segment->hw); |
800 | |
801 | list_add_tail(new: &segment->node, head: &chan->free_seg_list); |
802 | } |
803 | |
804 | /** |
805 | * xilinx_cdma_free_tx_segment - Free transaction segment |
806 | * @chan: Driver specific DMA channel |
807 | * @segment: DMA transaction segment |
808 | */ |
809 | static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, |
810 | struct xilinx_cdma_tx_segment *segment) |
811 | { |
812 | dma_pool_free(pool: chan->desc_pool, vaddr: segment, addr: segment->phys); |
813 | } |
814 | |
815 | /** |
816 | * xilinx_vdma_free_tx_segment - Free transaction segment |
817 | * @chan: Driver specific DMA channel |
818 | * @segment: DMA transaction segment |
819 | */ |
820 | static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, |
821 | struct xilinx_vdma_tx_segment *segment) |
822 | { |
823 | dma_pool_free(pool: chan->desc_pool, vaddr: segment, addr: segment->phys); |
824 | } |
825 | |
826 | /** |
827 | * xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor |
828 | * @chan: Driver specific DMA channel |
829 | * |
830 | * Return: The allocated descriptor on success and NULL on failure. |
831 | */ |
832 | static struct xilinx_dma_tx_descriptor * |
833 | xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) |
834 | { |
835 | struct xilinx_dma_tx_descriptor *desc; |
836 | |
837 | desc = kzalloc(size: sizeof(*desc), GFP_NOWAIT); |
838 | if (!desc) |
839 | return NULL; |
840 | |
841 | INIT_LIST_HEAD(list: &desc->segments); |
842 | |
843 | return desc; |
844 | } |
845 | |
846 | /** |
847 | * xilinx_dma_free_tx_descriptor - Free transaction descriptor |
848 | * @chan: Driver specific DMA channel |
849 | * @desc: DMA transaction descriptor |
850 | */ |
851 | static void |
852 | xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, |
853 | struct xilinx_dma_tx_descriptor *desc) |
854 | { |
855 | struct xilinx_vdma_tx_segment *segment, *next; |
856 | struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; |
857 | struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; |
858 | struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next; |
859 | |
860 | if (!desc) |
861 | return; |
862 | |
863 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
864 | list_for_each_entry_safe(segment, next, &desc->segments, node) { |
865 | list_del(entry: &segment->node); |
866 | xilinx_vdma_free_tx_segment(chan, segment); |
867 | } |
868 | } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
869 | list_for_each_entry_safe(cdma_segment, cdma_next, |
870 | &desc->segments, node) { |
871 | list_del(entry: &cdma_segment->node); |
872 | xilinx_cdma_free_tx_segment(chan, segment: cdma_segment); |
873 | } |
874 | } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
875 | list_for_each_entry_safe(axidma_segment, axidma_next, |
876 | &desc->segments, node) { |
877 | list_del(entry: &axidma_segment->node); |
878 | xilinx_dma_free_tx_segment(chan, segment: axidma_segment); |
879 | } |
880 | } else { |
881 | list_for_each_entry_safe(aximcdma_segment, aximcdma_next, |
882 | &desc->segments, node) { |
883 | list_del(entry: &aximcdma_segment->node); |
884 | xilinx_mcdma_free_tx_segment(chan, segment: aximcdma_segment); |
885 | } |
886 | } |
887 | |
888 | kfree(objp: desc); |
889 | } |
890 | |
891 | /* Required functions */ |
892 | |
893 | /** |
894 | * xilinx_dma_free_desc_list - Free descriptors list |
895 | * @chan: Driver specific DMA channel |
896 | * @list: List to parse and delete the descriptor |
897 | */ |
898 | static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, |
899 | struct list_head *list) |
900 | { |
901 | struct xilinx_dma_tx_descriptor *desc, *next; |
902 | |
903 | list_for_each_entry_safe(desc, next, list, node) { |
904 | list_del(entry: &desc->node); |
905 | xilinx_dma_free_tx_descriptor(chan, desc); |
906 | } |
907 | } |
908 | |
909 | /** |
910 | * xilinx_dma_free_descriptors - Free channel descriptors |
911 | * @chan: Driver specific DMA channel |
912 | */ |
913 | static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) |
914 | { |
915 | unsigned long flags; |
916 | |
917 | spin_lock_irqsave(&chan->lock, flags); |
918 | |
919 | xilinx_dma_free_desc_list(chan, list: &chan->pending_list); |
920 | xilinx_dma_free_desc_list(chan, list: &chan->done_list); |
921 | xilinx_dma_free_desc_list(chan, list: &chan->active_list); |
922 | |
923 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
924 | } |
925 | |
926 | /** |
927 | * xilinx_dma_free_chan_resources - Free channel resources |
928 | * @dchan: DMA channel |
929 | */ |
930 | static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) |
931 | { |
932 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
933 | unsigned long flags; |
934 | |
935 | dev_dbg(chan->dev, "Free all channel resources.\n"); |
936 | |
937 | xilinx_dma_free_descriptors(chan); |
938 | |
939 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
940 | spin_lock_irqsave(&chan->lock, flags); |
941 | INIT_LIST_HEAD(list: &chan->free_seg_list); |
942 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
943 | |
944 | /* Free memory that is allocated for BD */ |
945 | dma_free_coherent(dev: chan->dev, size: sizeof(*chan->seg_v) * |
946 | XILINX_DMA_NUM_DESCS, cpu_addr: chan->seg_v, |
947 | dma_handle: chan->seg_p); |
948 | |
949 | /* Free Memory that is allocated for cyclic DMA Mode */ |
950 | dma_free_coherent(dev: chan->dev, size: sizeof(*chan->cyclic_seg_v), |
951 | cpu_addr: chan->cyclic_seg_v, dma_handle: chan->cyclic_seg_p); |
952 | } |
953 | |
954 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { |
955 | spin_lock_irqsave(&chan->lock, flags); |
956 | INIT_LIST_HEAD(list: &chan->free_seg_list); |
957 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
958 | |
959 | /* Free memory that is allocated for BD */ |
960 | dma_free_coherent(dev: chan->dev, size: sizeof(*chan->seg_mv) * |
961 | XILINX_DMA_NUM_DESCS, cpu_addr: chan->seg_mv, |
962 | dma_handle: chan->seg_p); |
963 | } |
964 | |
965 | if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA && |
966 | chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) { |
967 | dma_pool_destroy(pool: chan->desc_pool); |
968 | chan->desc_pool = NULL; |
969 | } |
970 | |
971 | } |
972 | |
973 | /** |
974 | * xilinx_dma_get_residue - Compute residue for a given descriptor |
975 | * @chan: Driver specific dma channel |
976 | * @desc: dma transaction descriptor |
977 | * |
978 | * Return: The number of residue bytes for the descriptor. |
979 | */ |
980 | static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan, |
981 | struct xilinx_dma_tx_descriptor *desc) |
982 | { |
983 | struct xilinx_cdma_tx_segment *cdma_seg; |
984 | struct xilinx_axidma_tx_segment *axidma_seg; |
985 | struct xilinx_aximcdma_tx_segment *aximcdma_seg; |
986 | struct xilinx_cdma_desc_hw *cdma_hw; |
987 | struct xilinx_axidma_desc_hw *axidma_hw; |
988 | struct xilinx_aximcdma_desc_hw *aximcdma_hw; |
989 | struct list_head *entry; |
990 | u32 residue = 0; |
991 | |
992 | list_for_each(entry, &desc->segments) { |
993 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
994 | cdma_seg = list_entry(entry, |
995 | struct xilinx_cdma_tx_segment, |
996 | node); |
997 | cdma_hw = &cdma_seg->hw; |
998 | residue += (cdma_hw->control - cdma_hw->status) & |
999 | chan->xdev->max_buffer_len; |
1000 | } else if (chan->xdev->dma_config->dmatype == |
1001 | XDMA_TYPE_AXIDMA) { |
1002 | axidma_seg = list_entry(entry, |
1003 | struct xilinx_axidma_tx_segment, |
1004 | node); |
1005 | axidma_hw = &axidma_seg->hw; |
1006 | residue += (axidma_hw->control - axidma_hw->status) & |
1007 | chan->xdev->max_buffer_len; |
1008 | } else { |
1009 | aximcdma_seg = |
1010 | list_entry(entry, |
1011 | struct xilinx_aximcdma_tx_segment, |
1012 | node); |
1013 | aximcdma_hw = &aximcdma_seg->hw; |
1014 | residue += |
1015 | (aximcdma_hw->control - aximcdma_hw->status) & |
1016 | chan->xdev->max_buffer_len; |
1017 | } |
1018 | } |
1019 | |
1020 | return residue; |
1021 | } |
1022 | |
1023 | /** |
1024 | * xilinx_dma_chan_handle_cyclic - Cyclic dma callback |
1025 | * @chan: Driver specific dma channel |
1026 | * @desc: dma transaction descriptor |
1027 | * @flags: flags for spin lock |
1028 | */ |
1029 | static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, |
1030 | struct xilinx_dma_tx_descriptor *desc, |
1031 | unsigned long *flags) |
1032 | { |
1033 | struct dmaengine_desc_callback cb; |
1034 | |
1035 | dmaengine_desc_get_callback(tx: &desc->async_tx, cb: &cb); |
1036 | if (dmaengine_desc_callback_valid(cb: &cb)) { |
1037 | spin_unlock_irqrestore(lock: &chan->lock, flags: *flags); |
1038 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
1039 | spin_lock_irqsave(&chan->lock, *flags); |
1040 | } |
1041 | } |
1042 | |
1043 | /** |
1044 | * xilinx_dma_chan_desc_cleanup - Clean channel descriptors |
1045 | * @chan: Driver specific DMA channel |
1046 | */ |
1047 | static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) |
1048 | { |
1049 | struct xilinx_dma_tx_descriptor *desc, *next; |
1050 | unsigned long flags; |
1051 | |
1052 | spin_lock_irqsave(&chan->lock, flags); |
1053 | |
1054 | list_for_each_entry_safe(desc, next, &chan->done_list, node) { |
1055 | struct dmaengine_result result; |
1056 | |
1057 | if (desc->cyclic) { |
1058 | xilinx_dma_chan_handle_cyclic(chan, desc, flags: &flags); |
1059 | break; |
1060 | } |
1061 | |
1062 | /* Remove from the list of running transactions */ |
1063 | list_del(entry: &desc->node); |
1064 | |
1065 | if (unlikely(desc->err)) { |
1066 | if (chan->direction == DMA_DEV_TO_MEM) |
1067 | result.result = DMA_TRANS_READ_FAILED; |
1068 | else |
1069 | result.result = DMA_TRANS_WRITE_FAILED; |
1070 | } else { |
1071 | result.result = DMA_TRANS_NOERROR; |
1072 | } |
1073 | |
1074 | result.residue = desc->residue; |
1075 | |
1076 | /* Run the link descriptor callback function */ |
1077 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
1078 | dmaengine_desc_get_callback_invoke(tx: &desc->async_tx, result: &result); |
1079 | spin_lock_irqsave(&chan->lock, flags); |
1080 | |
1081 | /* Run any dependencies, then free the descriptor */ |
1082 | dma_run_dependencies(tx: &desc->async_tx); |
1083 | xilinx_dma_free_tx_descriptor(chan, desc); |
1084 | |
1085 | /* |
1086 | * While we ran a callback the user called a terminate function, |
1087 | * which takes care of cleaning up any remaining descriptors |
1088 | */ |
1089 | if (chan->terminating) |
1090 | break; |
1091 | } |
1092 | |
1093 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
1094 | } |
1095 | |
1096 | /** |
1097 | * xilinx_dma_do_tasklet - Schedule completion tasklet |
1098 | * @t: Pointer to the Xilinx DMA channel structure |
1099 | */ |
1100 | static void xilinx_dma_do_tasklet(struct tasklet_struct *t) |
1101 | { |
1102 | struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet); |
1103 | |
1104 | xilinx_dma_chan_desc_cleanup(chan); |
1105 | } |
1106 | |
1107 | /** |
1108 | * xilinx_dma_alloc_chan_resources - Allocate channel resources |
1109 | * @dchan: DMA channel |
1110 | * |
1111 | * Return: '0' on success and failure value on error |
1112 | */ |
1113 | static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) |
1114 | { |
1115 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
1116 | int i; |
1117 | |
1118 | /* Has this channel already been allocated? */ |
1119 | if (chan->desc_pool) |
1120 | return 0; |
1121 | |
1122 | /* |
1123 | * We need the descriptor to be aligned to 64bytes |
1124 | * for meeting Xilinx VDMA specification requirement. |
1125 | */ |
1126 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
1127 | /* Allocate the buffer descriptors. */ |
1128 | chan->seg_v = dma_alloc_coherent(dev: chan->dev, |
1129 | size: sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, |
1130 | dma_handle: &chan->seg_p, GFP_KERNEL); |
1131 | if (!chan->seg_v) { |
1132 | dev_err(chan->dev, |
1133 | "unable to allocate channel %d descriptors\n", |
1134 | chan->id); |
1135 | return -ENOMEM; |
1136 | } |
1137 | /* |
1138 | * For cyclic DMA mode we need to program the tail Descriptor |
1139 | * register with a value which is not a part of the BD chain |
1140 | * so allocating a desc segment during channel allocation for |
1141 | * programming tail descriptor. |
1142 | */ |
1143 | chan->cyclic_seg_v = dma_alloc_coherent(dev: chan->dev, |
1144 | size: sizeof(*chan->cyclic_seg_v), |
1145 | dma_handle: &chan->cyclic_seg_p, |
1146 | GFP_KERNEL); |
1147 | if (!chan->cyclic_seg_v) { |
1148 | dev_err(chan->dev, |
1149 | "unable to allocate desc segment for cyclic DMA\n"); |
1150 | dma_free_coherent(dev: chan->dev, size: sizeof(*chan->seg_v) * |
1151 | XILINX_DMA_NUM_DESCS, cpu_addr: chan->seg_v, |
1152 | dma_handle: chan->seg_p); |
1153 | return -ENOMEM; |
1154 | } |
1155 | chan->cyclic_seg_v->phys = chan->cyclic_seg_p; |
1156 | |
1157 | for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { |
1158 | chan->seg_v[i].hw.next_desc = |
1159 | lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) * |
1160 | ((i + 1) % XILINX_DMA_NUM_DESCS)); |
1161 | chan->seg_v[i].hw.next_desc_msb = |
1162 | upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) * |
1163 | ((i + 1) % XILINX_DMA_NUM_DESCS)); |
1164 | chan->seg_v[i].phys = chan->seg_p + |
1165 | sizeof(*chan->seg_v) * i; |
1166 | list_add_tail(new: &chan->seg_v[i].node, |
1167 | head: &chan->free_seg_list); |
1168 | } |
1169 | } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { |
1170 | /* Allocate the buffer descriptors. */ |
1171 | chan->seg_mv = dma_alloc_coherent(dev: chan->dev, |
1172 | size: sizeof(*chan->seg_mv) * |
1173 | XILINX_DMA_NUM_DESCS, |
1174 | dma_handle: &chan->seg_p, GFP_KERNEL); |
1175 | if (!chan->seg_mv) { |
1176 | dev_err(chan->dev, |
1177 | "unable to allocate channel %d descriptors\n", |
1178 | chan->id); |
1179 | return -ENOMEM; |
1180 | } |
1181 | for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { |
1182 | chan->seg_mv[i].hw.next_desc = |
1183 | lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * |
1184 | ((i + 1) % XILINX_DMA_NUM_DESCS)); |
1185 | chan->seg_mv[i].hw.next_desc_msb = |
1186 | upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * |
1187 | ((i + 1) % XILINX_DMA_NUM_DESCS)); |
1188 | chan->seg_mv[i].phys = chan->seg_p + |
1189 | sizeof(*chan->seg_mv) * i; |
1190 | list_add_tail(new: &chan->seg_mv[i].node, |
1191 | head: &chan->free_seg_list); |
1192 | } |
1193 | } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
1194 | chan->desc_pool = dma_pool_create(name: "xilinx_cdma_desc_pool", |
1195 | dev: chan->dev, |
1196 | size: sizeof(struct xilinx_cdma_tx_segment), |
1197 | align: __alignof__(struct xilinx_cdma_tx_segment), |
1198 | allocation: 0); |
1199 | } else { |
1200 | chan->desc_pool = dma_pool_create(name: "xilinx_vdma_desc_pool", |
1201 | dev: chan->dev, |
1202 | size: sizeof(struct xilinx_vdma_tx_segment), |
1203 | align: __alignof__(struct xilinx_vdma_tx_segment), |
1204 | allocation: 0); |
1205 | } |
1206 | |
1207 | if (!chan->desc_pool && |
1208 | ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) && |
1209 | chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) { |
1210 | dev_err(chan->dev, |
1211 | "unable to allocate channel %d descriptor pool\n", |
1212 | chan->id); |
1213 | return -ENOMEM; |
1214 | } |
1215 | |
1216 | dma_cookie_init(chan: dchan); |
1217 | |
1218 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
1219 | /* For AXI DMA resetting once channel will reset the |
1220 | * other channel as well so enable the interrupts here. |
1221 | */ |
1222 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, |
1223 | XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
1224 | } |
1225 | |
1226 | if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) |
1227 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, |
1228 | XILINX_CDMA_CR_SGMODE); |
1229 | |
1230 | return 0; |
1231 | } |
1232 | |
1233 | /** |
1234 | * xilinx_dma_calc_copysize - Calculate the amount of data to copy |
1235 | * @chan: Driver specific DMA channel |
1236 | * @size: Total data that needs to be copied |
1237 | * @done: Amount of data that has been already copied |
1238 | * |
1239 | * Return: Amount of data that has to be copied |
1240 | */ |
1241 | static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, |
1242 | int size, int done) |
1243 | { |
1244 | size_t copy; |
1245 | |
1246 | copy = min_t(size_t, size - done, |
1247 | chan->xdev->max_buffer_len); |
1248 | |
1249 | if ((copy + done < size) && |
1250 | chan->xdev->common.copy_align) { |
1251 | /* |
1252 | * If this is not the last descriptor, make sure |
1253 | * the next one will be properly aligned |
1254 | */ |
1255 | copy = rounddown(copy, |
1256 | (1 << chan->xdev->common.copy_align)); |
1257 | } |
1258 | return copy; |
1259 | } |
1260 | |
1261 | /** |
1262 | * xilinx_dma_tx_status - Get DMA transaction status |
1263 | * @dchan: DMA channel |
1264 | * @cookie: Transaction identifier |
1265 | * @txstate: Transaction state |
1266 | * |
1267 | * Return: DMA transaction status |
1268 | */ |
1269 | static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, |
1270 | dma_cookie_t cookie, |
1271 | struct dma_tx_state *txstate) |
1272 | { |
1273 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
1274 | struct xilinx_dma_tx_descriptor *desc; |
1275 | enum dma_status ret; |
1276 | unsigned long flags; |
1277 | u32 residue = 0; |
1278 | |
1279 | ret = dma_cookie_status(chan: dchan, cookie, state: txstate); |
1280 | if (ret == DMA_COMPLETE || !txstate) |
1281 | return ret; |
1282 | |
1283 | spin_lock_irqsave(&chan->lock, flags); |
1284 | if (!list_empty(head: &chan->active_list)) { |
1285 | desc = list_last_entry(&chan->active_list, |
1286 | struct xilinx_dma_tx_descriptor, node); |
1287 | /* |
1288 | * VDMA and simple mode do not support residue reporting, so the |
1289 | * residue field will always be 0. |
1290 | */ |
1291 | if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) |
1292 | residue = xilinx_dma_get_residue(chan, desc); |
1293 | } |
1294 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
1295 | |
1296 | dma_set_residue(state: txstate, residue); |
1297 | |
1298 | return ret; |
1299 | } |
1300 | |
1301 | /** |
1302 | * xilinx_dma_stop_transfer - Halt DMA channel |
1303 | * @chan: Driver specific DMA channel |
1304 | * |
1305 | * Return: '0' on success and failure value on error |
1306 | */ |
1307 | static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) |
1308 | { |
1309 | u32 val; |
1310 | |
1311 | dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); |
1312 | |
1313 | /* Wait for the hardware to halt */ |
1314 | return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, |
1315 | val & XILINX_DMA_DMASR_HALTED, 0, |
1316 | XILINX_DMA_LOOP_COUNT); |
1317 | } |
1318 | |
1319 | /** |
1320 | * xilinx_cdma_stop_transfer - Wait for the current transfer to complete |
1321 | * @chan: Driver specific DMA channel |
1322 | * |
1323 | * Return: '0' on success and failure value on error |
1324 | */ |
1325 | static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) |
1326 | { |
1327 | u32 val; |
1328 | |
1329 | return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, |
1330 | val & XILINX_DMA_DMASR_IDLE, 0, |
1331 | XILINX_DMA_LOOP_COUNT); |
1332 | } |
1333 | |
1334 | /** |
1335 | * xilinx_dma_start - Start DMA channel |
1336 | * @chan: Driver specific DMA channel |
1337 | */ |
1338 | static void xilinx_dma_start(struct xilinx_dma_chan *chan) |
1339 | { |
1340 | int err; |
1341 | u32 val; |
1342 | |
1343 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); |
1344 | |
1345 | /* Wait for the hardware to start */ |
1346 | err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, |
1347 | !(val & XILINX_DMA_DMASR_HALTED), 0, |
1348 | XILINX_DMA_LOOP_COUNT); |
1349 | |
1350 | if (err) { |
1351 | dev_err(chan->dev, "Cannot start channel %p: %x\n", |
1352 | chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
1353 | |
1354 | chan->err = true; |
1355 | } |
1356 | } |
1357 | |
1358 | /** |
1359 | * xilinx_vdma_start_transfer - Starts VDMA transfer |
1360 | * @chan: Driver specific channel struct pointer |
1361 | */ |
1362 | static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) |
1363 | { |
1364 | struct xilinx_vdma_config *config = &chan->config; |
1365 | struct xilinx_dma_tx_descriptor *desc; |
1366 | u32 reg, j; |
1367 | struct xilinx_vdma_tx_segment *segment, *last = NULL; |
1368 | int i = 0; |
1369 | |
1370 | /* This function was invoked with lock held */ |
1371 | if (chan->err) |
1372 | return; |
1373 | |
1374 | if (!chan->idle) |
1375 | return; |
1376 | |
1377 | if (list_empty(head: &chan->pending_list)) |
1378 | return; |
1379 | |
1380 | desc = list_first_entry(&chan->pending_list, |
1381 | struct xilinx_dma_tx_descriptor, node); |
1382 | |
1383 | /* Configure the hardware using info in the config structure */ |
1384 | if (chan->has_vflip) { |
1385 | reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); |
1386 | reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP; |
1387 | reg |= config->vflip_en; |
1388 | dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP, |
1389 | value: reg); |
1390 | } |
1391 | |
1392 | reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); |
1393 | |
1394 | if (config->frm_cnt_en) |
1395 | reg |= XILINX_DMA_DMACR_FRAMECNT_EN; |
1396 | else |
1397 | reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; |
1398 | |
1399 | /* If not parking, enable circular mode */ |
1400 | if (config->park) |
1401 | reg &= ~XILINX_DMA_DMACR_CIRC_EN; |
1402 | else |
1403 | reg |= XILINX_DMA_DMACR_CIRC_EN; |
1404 | |
1405 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, value: reg); |
1406 | |
1407 | j = chan->desc_submitcount; |
1408 | reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); |
1409 | if (chan->direction == DMA_MEM_TO_DEV) { |
1410 | reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; |
1411 | reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; |
1412 | } else { |
1413 | reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; |
1414 | reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; |
1415 | } |
1416 | dma_write(chan, XILINX_DMA_REG_PARK_PTR, value: reg); |
1417 | |
1418 | /* Start the hardware */ |
1419 | xilinx_dma_start(chan); |
1420 | |
1421 | if (chan->err) |
1422 | return; |
1423 | |
1424 | /* Start the transfer */ |
1425 | if (chan->desc_submitcount < chan->num_frms) |
1426 | i = chan->desc_submitcount; |
1427 | |
1428 | list_for_each_entry(segment, &desc->segments, node) { |
1429 | if (chan->ext_addr) |
1430 | vdma_desc_write_64(chan, |
1431 | XILINX_VDMA_REG_START_ADDRESS_64(i++), |
1432 | value_lsb: segment->hw.buf_addr, |
1433 | value_msb: segment->hw.buf_addr_msb); |
1434 | else |
1435 | vdma_desc_write(chan, |
1436 | XILINX_VDMA_REG_START_ADDRESS(i++), |
1437 | value: segment->hw.buf_addr); |
1438 | |
1439 | last = segment; |
1440 | } |
1441 | |
1442 | if (!last) |
1443 | return; |
1444 | |
1445 | /* HW expects these parameters to be same for one transaction */ |
1446 | vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, value: last->hw.hsize); |
1447 | vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, |
1448 | value: last->hw.stride); |
1449 | vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, value: last->hw.vsize); |
1450 | |
1451 | chan->desc_submitcount++; |
1452 | chan->desc_pendingcount--; |
1453 | list_move_tail(list: &desc->node, head: &chan->active_list); |
1454 | if (chan->desc_submitcount == chan->num_frms) |
1455 | chan->desc_submitcount = 0; |
1456 | |
1457 | chan->idle = false; |
1458 | } |
1459 | |
1460 | /** |
1461 | * xilinx_cdma_start_transfer - Starts cdma transfer |
1462 | * @chan: Driver specific channel struct pointer |
1463 | */ |
1464 | static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) |
1465 | { |
1466 | struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; |
1467 | struct xilinx_cdma_tx_segment *tail_segment; |
1468 | u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); |
1469 | |
1470 | if (chan->err) |
1471 | return; |
1472 | |
1473 | if (!chan->idle) |
1474 | return; |
1475 | |
1476 | if (list_empty(head: &chan->pending_list)) |
1477 | return; |
1478 | |
1479 | head_desc = list_first_entry(&chan->pending_list, |
1480 | struct xilinx_dma_tx_descriptor, node); |
1481 | tail_desc = list_last_entry(&chan->pending_list, |
1482 | struct xilinx_dma_tx_descriptor, node); |
1483 | tail_segment = list_last_entry(&tail_desc->segments, |
1484 | struct xilinx_cdma_tx_segment, node); |
1485 | |
1486 | if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { |
1487 | ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; |
1488 | ctrl_reg |= chan->desc_pendingcount << |
1489 | XILINX_DMA_CR_COALESCE_SHIFT; |
1490 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, value: ctrl_reg); |
1491 | } |
1492 | |
1493 | if (chan->has_sg) { |
1494 | dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, |
1495 | XILINX_CDMA_CR_SGMODE); |
1496 | |
1497 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, |
1498 | XILINX_CDMA_CR_SGMODE); |
1499 | |
1500 | xilinx_write(chan, XILINX_DMA_REG_CURDESC, |
1501 | addr: head_desc->async_tx.phys); |
1502 | |
1503 | /* Update tail ptr register which will start the transfer */ |
1504 | xilinx_write(chan, XILINX_DMA_REG_TAILDESC, |
1505 | addr: tail_segment->phys); |
1506 | } else { |
1507 | /* In simple mode */ |
1508 | struct xilinx_cdma_tx_segment *segment; |
1509 | struct xilinx_cdma_desc_hw *hw; |
1510 | |
1511 | segment = list_first_entry(&head_desc->segments, |
1512 | struct xilinx_cdma_tx_segment, |
1513 | node); |
1514 | |
1515 | hw = &segment->hw; |
1516 | |
1517 | xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, |
1518 | xilinx_prep_dma_addr_t(hw->src_addr)); |
1519 | xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, |
1520 | xilinx_prep_dma_addr_t(hw->dest_addr)); |
1521 | |
1522 | /* Start the transfer */ |
1523 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, |
1524 | value: hw->control & chan->xdev->max_buffer_len); |
1525 | } |
1526 | |
1527 | list_splice_tail_init(list: &chan->pending_list, head: &chan->active_list); |
1528 | chan->desc_pendingcount = 0; |
1529 | chan->idle = false; |
1530 | } |
1531 | |
1532 | /** |
1533 | * xilinx_dma_start_transfer - Starts DMA transfer |
1534 | * @chan: Driver specific channel struct pointer |
1535 | */ |
1536 | static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) |
1537 | { |
1538 | struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; |
1539 | struct xilinx_axidma_tx_segment *tail_segment; |
1540 | u32 reg; |
1541 | |
1542 | if (chan->err) |
1543 | return; |
1544 | |
1545 | if (list_empty(head: &chan->pending_list)) |
1546 | return; |
1547 | |
1548 | if (!chan->idle) |
1549 | return; |
1550 | |
1551 | head_desc = list_first_entry(&chan->pending_list, |
1552 | struct xilinx_dma_tx_descriptor, node); |
1553 | tail_desc = list_last_entry(&chan->pending_list, |
1554 | struct xilinx_dma_tx_descriptor, node); |
1555 | tail_segment = list_last_entry(&tail_desc->segments, |
1556 | struct xilinx_axidma_tx_segment, node); |
1557 | |
1558 | reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); |
1559 | |
1560 | if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { |
1561 | reg &= ~XILINX_DMA_CR_COALESCE_MAX; |
1562 | reg |= chan->desc_pendingcount << |
1563 | XILINX_DMA_CR_COALESCE_SHIFT; |
1564 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, value: reg); |
1565 | } |
1566 | |
1567 | if (chan->has_sg) |
1568 | xilinx_write(chan, XILINX_DMA_REG_CURDESC, |
1569 | addr: head_desc->async_tx.phys); |
1570 | reg &= ~XILINX_DMA_CR_DELAY_MAX; |
1571 | reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT; |
1572 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, value: reg); |
1573 | |
1574 | xilinx_dma_start(chan); |
1575 | |
1576 | if (chan->err) |
1577 | return; |
1578 | |
1579 | /* Start the transfer */ |
1580 | if (chan->has_sg) { |
1581 | if (chan->cyclic) |
1582 | xilinx_write(chan, XILINX_DMA_REG_TAILDESC, |
1583 | addr: chan->cyclic_seg_v->phys); |
1584 | else |
1585 | xilinx_write(chan, XILINX_DMA_REG_TAILDESC, |
1586 | addr: tail_segment->phys); |
1587 | } else { |
1588 | struct xilinx_axidma_tx_segment *segment; |
1589 | struct xilinx_axidma_desc_hw *hw; |
1590 | |
1591 | segment = list_first_entry(&head_desc->segments, |
1592 | struct xilinx_axidma_tx_segment, |
1593 | node); |
1594 | hw = &segment->hw; |
1595 | |
1596 | xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, |
1597 | xilinx_prep_dma_addr_t(hw->buf_addr)); |
1598 | |
1599 | /* Start the transfer */ |
1600 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, |
1601 | value: hw->control & chan->xdev->max_buffer_len); |
1602 | } |
1603 | |
1604 | list_splice_tail_init(list: &chan->pending_list, head: &chan->active_list); |
1605 | chan->desc_pendingcount = 0; |
1606 | chan->idle = false; |
1607 | } |
1608 | |
1609 | /** |
1610 | * xilinx_mcdma_start_transfer - Starts MCDMA transfer |
1611 | * @chan: Driver specific channel struct pointer |
1612 | */ |
1613 | static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan) |
1614 | { |
1615 | struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; |
1616 | struct xilinx_aximcdma_tx_segment *tail_segment; |
1617 | u32 reg; |
1618 | |
1619 | /* |
1620 | * lock has been held by calling functions, so we don't need it |
1621 | * to take it here again. |
1622 | */ |
1623 | |
1624 | if (chan->err) |
1625 | return; |
1626 | |
1627 | if (!chan->idle) |
1628 | return; |
1629 | |
1630 | if (list_empty(head: &chan->pending_list)) |
1631 | return; |
1632 | |
1633 | head_desc = list_first_entry(&chan->pending_list, |
1634 | struct xilinx_dma_tx_descriptor, node); |
1635 | tail_desc = list_last_entry(&chan->pending_list, |
1636 | struct xilinx_dma_tx_descriptor, node); |
1637 | tail_segment = list_last_entry(&tail_desc->segments, |
1638 | struct xilinx_aximcdma_tx_segment, node); |
1639 | |
1640 | reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); |
1641 | |
1642 | if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) { |
1643 | reg &= ~XILINX_MCDMA_COALESCE_MASK; |
1644 | reg |= chan->desc_pendingcount << |
1645 | XILINX_MCDMA_COALESCE_SHIFT; |
1646 | } |
1647 | |
1648 | reg |= XILINX_MCDMA_IRQ_ALL_MASK; |
1649 | dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), value: reg); |
1650 | |
1651 | /* Program current descriptor */ |
1652 | xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest), |
1653 | addr: head_desc->async_tx.phys); |
1654 | |
1655 | /* Program channel enable register */ |
1656 | reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET); |
1657 | reg |= BIT(chan->tdest); |
1658 | dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, value: reg); |
1659 | |
1660 | /* Start the fetch of BDs for the channel */ |
1661 | reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); |
1662 | reg |= XILINX_MCDMA_CR_RUNSTOP_MASK; |
1663 | dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), value: reg); |
1664 | |
1665 | xilinx_dma_start(chan); |
1666 | |
1667 | if (chan->err) |
1668 | return; |
1669 | |
1670 | /* Start the transfer */ |
1671 | xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest), |
1672 | addr: tail_segment->phys); |
1673 | |
1674 | list_splice_tail_init(list: &chan->pending_list, head: &chan->active_list); |
1675 | chan->desc_pendingcount = 0; |
1676 | chan->idle = false; |
1677 | } |
1678 | |
1679 | /** |
1680 | * xilinx_dma_issue_pending - Issue pending transactions |
1681 | * @dchan: DMA channel |
1682 | */ |
1683 | static void xilinx_dma_issue_pending(struct dma_chan *dchan) |
1684 | { |
1685 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
1686 | unsigned long flags; |
1687 | |
1688 | spin_lock_irqsave(&chan->lock, flags); |
1689 | chan->start_transfer(chan); |
1690 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
1691 | } |
1692 | |
1693 | /** |
1694 | * xilinx_dma_device_config - Configure the DMA channel |
1695 | * @dchan: DMA channel |
1696 | * @config: channel configuration |
1697 | * |
1698 | * Return: 0 always. |
1699 | */ |
1700 | static int xilinx_dma_device_config(struct dma_chan *dchan, |
1701 | struct dma_slave_config *config) |
1702 | { |
1703 | return 0; |
1704 | } |
1705 | |
1706 | /** |
1707 | * xilinx_dma_complete_descriptor - Mark the active descriptor as complete |
1708 | * @chan : xilinx DMA channel |
1709 | * |
1710 | * CONTEXT: hardirq |
1711 | */ |
1712 | static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) |
1713 | { |
1714 | struct xilinx_dma_tx_descriptor *desc, *next; |
1715 | |
1716 | /* This function was invoked with lock held */ |
1717 | if (list_empty(head: &chan->active_list)) |
1718 | return; |
1719 | |
1720 | list_for_each_entry_safe(desc, next, &chan->active_list, node) { |
1721 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
1722 | struct xilinx_axidma_tx_segment *seg; |
1723 | |
1724 | seg = list_last_entry(&desc->segments, |
1725 | struct xilinx_axidma_tx_segment, node); |
1726 | if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg) |
1727 | break; |
1728 | } |
1729 | if (chan->has_sg && chan->xdev->dma_config->dmatype != |
1730 | XDMA_TYPE_VDMA) |
1731 | desc->residue = xilinx_dma_get_residue(chan, desc); |
1732 | else |
1733 | desc->residue = 0; |
1734 | desc->err = chan->err; |
1735 | |
1736 | list_del(entry: &desc->node); |
1737 | if (!desc->cyclic) |
1738 | dma_cookie_complete(tx: &desc->async_tx); |
1739 | list_add_tail(new: &desc->node, head: &chan->done_list); |
1740 | } |
1741 | } |
1742 | |
1743 | /** |
1744 | * xilinx_dma_reset - Reset DMA channel |
1745 | * @chan: Driver specific DMA channel |
1746 | * |
1747 | * Return: '0' on success and failure value on error |
1748 | */ |
1749 | static int xilinx_dma_reset(struct xilinx_dma_chan *chan) |
1750 | { |
1751 | int err; |
1752 | u32 tmp; |
1753 | |
1754 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); |
1755 | |
1756 | /* Wait for the hardware to finish reset */ |
1757 | err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, |
1758 | !(tmp & XILINX_DMA_DMACR_RESET), 0, |
1759 | XILINX_DMA_LOOP_COUNT); |
1760 | |
1761 | if (err) { |
1762 | dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", |
1763 | dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), |
1764 | dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
1765 | return -ETIMEDOUT; |
1766 | } |
1767 | |
1768 | chan->err = false; |
1769 | chan->idle = true; |
1770 | chan->desc_pendingcount = 0; |
1771 | chan->desc_submitcount = 0; |
1772 | |
1773 | return err; |
1774 | } |
1775 | |
1776 | /** |
1777 | * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts |
1778 | * @chan: Driver specific DMA channel |
1779 | * |
1780 | * Return: '0' on success and failure value on error |
1781 | */ |
1782 | static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) |
1783 | { |
1784 | int err; |
1785 | |
1786 | /* Reset VDMA */ |
1787 | err = xilinx_dma_reset(chan); |
1788 | if (err) |
1789 | return err; |
1790 | |
1791 | /* Enable interrupts */ |
1792 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, |
1793 | XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
1794 | |
1795 | return 0; |
1796 | } |
1797 | |
1798 | /** |
1799 | * xilinx_mcdma_irq_handler - MCDMA Interrupt handler |
1800 | * @irq: IRQ number |
1801 | * @data: Pointer to the Xilinx MCDMA channel structure |
1802 | * |
1803 | * Return: IRQ_HANDLED/IRQ_NONE |
1804 | */ |
1805 | static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data) |
1806 | { |
1807 | struct xilinx_dma_chan *chan = data; |
1808 | u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id; |
1809 | |
1810 | if (chan->direction == DMA_DEV_TO_MEM) |
1811 | ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET; |
1812 | else |
1813 | ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET; |
1814 | |
1815 | /* Read the channel id raising the interrupt*/ |
1816 | chan_sermask = dma_ctrl_read(chan, reg: ser_offset); |
1817 | chan_id = ffs(chan_sermask); |
1818 | |
1819 | if (!chan_id) |
1820 | return IRQ_NONE; |
1821 | |
1822 | if (chan->direction == DMA_DEV_TO_MEM) |
1823 | chan_offset = chan->xdev->dma_config->max_channels / 2; |
1824 | |
1825 | chan_offset = chan_offset + (chan_id - 1); |
1826 | chan = chan->xdev->chan[chan_offset]; |
1827 | /* Read the status and ack the interrupts. */ |
1828 | status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest)); |
1829 | if (!(status & XILINX_MCDMA_IRQ_ALL_MASK)) |
1830 | return IRQ_NONE; |
1831 | |
1832 | dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest), |
1833 | value: status & XILINX_MCDMA_IRQ_ALL_MASK); |
1834 | |
1835 | if (status & XILINX_MCDMA_IRQ_ERR_MASK) { |
1836 | dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n", |
1837 | chan, |
1838 | dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET), |
1839 | dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET |
1840 | (chan->tdest)), |
1841 | dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET |
1842 | (chan->tdest))); |
1843 | chan->err = true; |
1844 | } |
1845 | |
1846 | if (status & XILINX_MCDMA_IRQ_DELAY_MASK) { |
1847 | /* |
1848 | * Device takes too long to do the transfer when user requires |
1849 | * responsiveness. |
1850 | */ |
1851 | dev_dbg(chan->dev, "Inter-packet latency too long\n"); |
1852 | } |
1853 | |
1854 | if (status & XILINX_MCDMA_IRQ_IOC_MASK) { |
1855 | spin_lock(lock: &chan->lock); |
1856 | xilinx_dma_complete_descriptor(chan); |
1857 | chan->idle = true; |
1858 | chan->start_transfer(chan); |
1859 | spin_unlock(lock: &chan->lock); |
1860 | } |
1861 | |
1862 | tasklet_hi_schedule(t: &chan->tasklet); |
1863 | return IRQ_HANDLED; |
1864 | } |
1865 | |
1866 | /** |
1867 | * xilinx_dma_irq_handler - DMA Interrupt handler |
1868 | * @irq: IRQ number |
1869 | * @data: Pointer to the Xilinx DMA channel structure |
1870 | * |
1871 | * Return: IRQ_HANDLED/IRQ_NONE |
1872 | */ |
1873 | static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) |
1874 | { |
1875 | struct xilinx_dma_chan *chan = data; |
1876 | u32 status; |
1877 | |
1878 | /* Read the status and ack the interrupts. */ |
1879 | status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); |
1880 | if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) |
1881 | return IRQ_NONE; |
1882 | |
1883 | dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, |
1884 | value: status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
1885 | |
1886 | if (status & XILINX_DMA_DMASR_ERR_IRQ) { |
1887 | /* |
1888 | * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the |
1889 | * error is recoverable, ignore it. Otherwise flag the error. |
1890 | * |
1891 | * Only recoverable errors can be cleared in the DMASR register, |
1892 | * make sure not to write to other error bits to 1. |
1893 | */ |
1894 | u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; |
1895 | |
1896 | dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, |
1897 | value: errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); |
1898 | |
1899 | if (!chan->flush_on_fsync || |
1900 | (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { |
1901 | dev_err(chan->dev, |
1902 | "Channel %p has errors %x, cdr %x tdr %x\n", |
1903 | chan, errors, |
1904 | dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), |
1905 | dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); |
1906 | chan->err = true; |
1907 | } |
1908 | } |
1909 | |
1910 | if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ | |
1911 | XILINX_DMA_DMASR_DLY_CNT_IRQ)) { |
1912 | spin_lock(lock: &chan->lock); |
1913 | xilinx_dma_complete_descriptor(chan); |
1914 | chan->idle = true; |
1915 | chan->start_transfer(chan); |
1916 | spin_unlock(lock: &chan->lock); |
1917 | } |
1918 | |
1919 | tasklet_schedule(t: &chan->tasklet); |
1920 | return IRQ_HANDLED; |
1921 | } |
1922 | |
1923 | /** |
1924 | * append_desc_queue - Queuing descriptor |
1925 | * @chan: Driver specific dma channel |
1926 | * @desc: dma transaction descriptor |
1927 | */ |
1928 | static void append_desc_queue(struct xilinx_dma_chan *chan, |
1929 | struct xilinx_dma_tx_descriptor *desc) |
1930 | { |
1931 | struct xilinx_vdma_tx_segment *tail_segment; |
1932 | struct xilinx_dma_tx_descriptor *tail_desc; |
1933 | struct xilinx_axidma_tx_segment *axidma_tail_segment; |
1934 | struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment; |
1935 | struct xilinx_cdma_tx_segment *cdma_tail_segment; |
1936 | |
1937 | if (list_empty(head: &chan->pending_list)) |
1938 | goto append; |
1939 | |
1940 | /* |
1941 | * Add the hardware descriptor to the chain of hardware descriptors |
1942 | * that already exists in memory. |
1943 | */ |
1944 | tail_desc = list_last_entry(&chan->pending_list, |
1945 | struct xilinx_dma_tx_descriptor, node); |
1946 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
1947 | tail_segment = list_last_entry(&tail_desc->segments, |
1948 | struct xilinx_vdma_tx_segment, |
1949 | node); |
1950 | tail_segment->hw.next_desc = (u32)desc->async_tx.phys; |
1951 | } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
1952 | cdma_tail_segment = list_last_entry(&tail_desc->segments, |
1953 | struct xilinx_cdma_tx_segment, |
1954 | node); |
1955 | cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; |
1956 | } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
1957 | axidma_tail_segment = list_last_entry(&tail_desc->segments, |
1958 | struct xilinx_axidma_tx_segment, |
1959 | node); |
1960 | axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; |
1961 | } else { |
1962 | aximcdma_tail_segment = |
1963 | list_last_entry(&tail_desc->segments, |
1964 | struct xilinx_aximcdma_tx_segment, |
1965 | node); |
1966 | aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; |
1967 | } |
1968 | |
1969 | /* |
1970 | * Add the software descriptor and all children to the list |
1971 | * of pending transactions |
1972 | */ |
1973 | append: |
1974 | list_add_tail(new: &desc->node, head: &chan->pending_list); |
1975 | chan->desc_pendingcount++; |
1976 | |
1977 | if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) |
1978 | && unlikely(chan->desc_pendingcount > chan->num_frms)) { |
1979 | dev_dbg(chan->dev, "desc pendingcount is too high\n"); |
1980 | chan->desc_pendingcount = chan->num_frms; |
1981 | } |
1982 | } |
1983 | |
1984 | /** |
1985 | * xilinx_dma_tx_submit - Submit DMA transaction |
1986 | * @tx: Async transaction descriptor |
1987 | * |
1988 | * Return: cookie value on success and failure value on error |
1989 | */ |
1990 | static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
1991 | { |
1992 | struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); |
1993 | struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); |
1994 | dma_cookie_t cookie; |
1995 | unsigned long flags; |
1996 | int err; |
1997 | |
1998 | if (chan->cyclic) { |
1999 | xilinx_dma_free_tx_descriptor(chan, desc); |
2000 | return -EBUSY; |
2001 | } |
2002 | |
2003 | if (chan->err) { |
2004 | /* |
2005 | * If reset fails, need to hard reset the system. |
2006 | * Channel is no longer functional |
2007 | */ |
2008 | err = xilinx_dma_chan_reset(chan); |
2009 | if (err < 0) |
2010 | return err; |
2011 | } |
2012 | |
2013 | spin_lock_irqsave(&chan->lock, flags); |
2014 | |
2015 | cookie = dma_cookie_assign(tx); |
2016 | |
2017 | /* Put this transaction onto the tail of the pending queue */ |
2018 | append_desc_queue(chan, desc); |
2019 | |
2020 | if (desc->cyclic) |
2021 | chan->cyclic = true; |
2022 | |
2023 | chan->terminating = false; |
2024 | |
2025 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
2026 | |
2027 | return cookie; |
2028 | } |
2029 | |
2030 | /** |
2031 | * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a |
2032 | * DMA_SLAVE transaction |
2033 | * @dchan: DMA channel |
2034 | * @xt: Interleaved template pointer |
2035 | * @flags: transfer ack flags |
2036 | * |
2037 | * Return: Async transaction descriptor on success and NULL on failure |
2038 | */ |
2039 | static struct dma_async_tx_descriptor * |
2040 | xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, |
2041 | struct dma_interleaved_template *xt, |
2042 | unsigned long flags) |
2043 | { |
2044 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
2045 | struct xilinx_dma_tx_descriptor *desc; |
2046 | struct xilinx_vdma_tx_segment *segment; |
2047 | struct xilinx_vdma_desc_hw *hw; |
2048 | |
2049 | if (!is_slave_direction(direction: xt->dir)) |
2050 | return NULL; |
2051 | |
2052 | if (!xt->numf || !xt->sgl[0].size) |
2053 | return NULL; |
2054 | |
2055 | if (xt->numf & ~XILINX_DMA_VSIZE_MASK || |
2056 | xt->sgl[0].size & ~XILINX_DMA_HSIZE_MASK) |
2057 | return NULL; |
2058 | |
2059 | if (xt->frame_size != 1) |
2060 | return NULL; |
2061 | |
2062 | /* Allocate a transaction descriptor. */ |
2063 | desc = xilinx_dma_alloc_tx_descriptor(chan); |
2064 | if (!desc) |
2065 | return NULL; |
2066 | |
2067 | dma_async_tx_descriptor_init(tx: &desc->async_tx, chan: &chan->common); |
2068 | desc->async_tx.tx_submit = xilinx_dma_tx_submit; |
2069 | async_tx_ack(tx: &desc->async_tx); |
2070 | |
2071 | /* Allocate the link descriptor from DMA pool */ |
2072 | segment = xilinx_vdma_alloc_tx_segment(chan); |
2073 | if (!segment) |
2074 | goto error; |
2075 | |
2076 | /* Fill in the hardware descriptor */ |
2077 | hw = &segment->hw; |
2078 | hw->vsize = xt->numf; |
2079 | hw->hsize = xt->sgl[0].size; |
2080 | hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << |
2081 | XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; |
2082 | hw->stride |= chan->config.frm_dly << |
2083 | XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; |
2084 | |
2085 | if (xt->dir != DMA_MEM_TO_DEV) { |
2086 | if (chan->ext_addr) { |
2087 | hw->buf_addr = lower_32_bits(xt->dst_start); |
2088 | hw->buf_addr_msb = upper_32_bits(xt->dst_start); |
2089 | } else { |
2090 | hw->buf_addr = xt->dst_start; |
2091 | } |
2092 | } else { |
2093 | if (chan->ext_addr) { |
2094 | hw->buf_addr = lower_32_bits(xt->src_start); |
2095 | hw->buf_addr_msb = upper_32_bits(xt->src_start); |
2096 | } else { |
2097 | hw->buf_addr = xt->src_start; |
2098 | } |
2099 | } |
2100 | |
2101 | /* Insert the segment into the descriptor segments list. */ |
2102 | list_add_tail(new: &segment->node, head: &desc->segments); |
2103 | |
2104 | /* Link the last hardware descriptor with the first. */ |
2105 | segment = list_first_entry(&desc->segments, |
2106 | struct xilinx_vdma_tx_segment, node); |
2107 | desc->async_tx.phys = segment->phys; |
2108 | |
2109 | return &desc->async_tx; |
2110 | |
2111 | error: |
2112 | xilinx_dma_free_tx_descriptor(chan, desc); |
2113 | return NULL; |
2114 | } |
2115 | |
2116 | /** |
2117 | * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction |
2118 | * @dchan: DMA channel |
2119 | * @dma_dst: destination address |
2120 | * @dma_src: source address |
2121 | * @len: transfer length |
2122 | * @flags: transfer ack flags |
2123 | * |
2124 | * Return: Async transaction descriptor on success and NULL on failure |
2125 | */ |
2126 | static struct dma_async_tx_descriptor * |
2127 | xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, |
2128 | dma_addr_t dma_src, size_t len, unsigned long flags) |
2129 | { |
2130 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
2131 | struct xilinx_dma_tx_descriptor *desc; |
2132 | struct xilinx_cdma_tx_segment *segment; |
2133 | struct xilinx_cdma_desc_hw *hw; |
2134 | |
2135 | if (!len || len > chan->xdev->max_buffer_len) |
2136 | return NULL; |
2137 | |
2138 | desc = xilinx_dma_alloc_tx_descriptor(chan); |
2139 | if (!desc) |
2140 | return NULL; |
2141 | |
2142 | dma_async_tx_descriptor_init(tx: &desc->async_tx, chan: &chan->common); |
2143 | desc->async_tx.tx_submit = xilinx_dma_tx_submit; |
2144 | |
2145 | /* Allocate the link descriptor from DMA pool */ |
2146 | segment = xilinx_cdma_alloc_tx_segment(chan); |
2147 | if (!segment) |
2148 | goto error; |
2149 | |
2150 | hw = &segment->hw; |
2151 | hw->control = len; |
2152 | hw->src_addr = dma_src; |
2153 | hw->dest_addr = dma_dst; |
2154 | if (chan->ext_addr) { |
2155 | hw->src_addr_msb = upper_32_bits(dma_src); |
2156 | hw->dest_addr_msb = upper_32_bits(dma_dst); |
2157 | } |
2158 | |
2159 | /* Insert the segment into the descriptor segments list. */ |
2160 | list_add_tail(new: &segment->node, head: &desc->segments); |
2161 | |
2162 | desc->async_tx.phys = segment->phys; |
2163 | hw->next_desc = segment->phys; |
2164 | |
2165 | return &desc->async_tx; |
2166 | |
2167 | error: |
2168 | xilinx_dma_free_tx_descriptor(chan, desc); |
2169 | return NULL; |
2170 | } |
2171 | |
2172 | /** |
2173 | * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction |
2174 | * @dchan: DMA channel |
2175 | * @sgl: scatterlist to transfer to/from |
2176 | * @sg_len: number of entries in @scatterlist |
2177 | * @direction: DMA direction |
2178 | * @flags: transfer ack flags |
2179 | * @context: APP words of the descriptor |
2180 | * |
2181 | * Return: Async transaction descriptor on success and NULL on failure |
2182 | */ |
2183 | static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( |
2184 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, |
2185 | enum dma_transfer_direction direction, unsigned long flags, |
2186 | void *context) |
2187 | { |
2188 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
2189 | struct xilinx_dma_tx_descriptor *desc; |
2190 | struct xilinx_axidma_tx_segment *segment = NULL; |
2191 | u32 *app_w = (u32 *)context; |
2192 | struct scatterlist *sg; |
2193 | size_t copy; |
2194 | size_t sg_used; |
2195 | unsigned int i; |
2196 | |
2197 | if (!is_slave_direction(direction)) |
2198 | return NULL; |
2199 | |
2200 | /* Allocate a transaction descriptor. */ |
2201 | desc = xilinx_dma_alloc_tx_descriptor(chan); |
2202 | if (!desc) |
2203 | return NULL; |
2204 | |
2205 | dma_async_tx_descriptor_init(tx: &desc->async_tx, chan: &chan->common); |
2206 | desc->async_tx.tx_submit = xilinx_dma_tx_submit; |
2207 | |
2208 | /* Build transactions using information in the scatter gather list */ |
2209 | for_each_sg(sgl, sg, sg_len, i) { |
2210 | sg_used = 0; |
2211 | |
2212 | /* Loop until the entire scatterlist entry is used */ |
2213 | while (sg_used < sg_dma_len(sg)) { |
2214 | struct xilinx_axidma_desc_hw *hw; |
2215 | |
2216 | /* Get a free segment */ |
2217 | segment = xilinx_axidma_alloc_tx_segment(chan); |
2218 | if (!segment) |
2219 | goto error; |
2220 | |
2221 | /* |
2222 | * Calculate the maximum number of bytes to transfer, |
2223 | * making sure it is less than the hw limit |
2224 | */ |
2225 | copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), |
2226 | done: sg_used); |
2227 | hw = &segment->hw; |
2228 | |
2229 | /* Fill in the descriptor */ |
2230 | xilinx_axidma_buf(chan, hw, sg_dma_address(sg), |
2231 | sg_used, period_len: 0); |
2232 | |
2233 | hw->control = copy; |
2234 | |
2235 | if (chan->direction == DMA_MEM_TO_DEV) { |
2236 | if (app_w) |
2237 | memcpy(hw->app, app_w, sizeof(u32) * |
2238 | XILINX_DMA_NUM_APP_WORDS); |
2239 | } |
2240 | |
2241 | sg_used += copy; |
2242 | |
2243 | /* |
2244 | * Insert the segment into the descriptor segments |
2245 | * list. |
2246 | */ |
2247 | list_add_tail(new: &segment->node, head: &desc->segments); |
2248 | } |
2249 | } |
2250 | |
2251 | segment = list_first_entry(&desc->segments, |
2252 | struct xilinx_axidma_tx_segment, node); |
2253 | desc->async_tx.phys = segment->phys; |
2254 | |
2255 | /* For the last DMA_MEM_TO_DEV transfer, set EOP */ |
2256 | if (chan->direction == DMA_MEM_TO_DEV) { |
2257 | segment->hw.control |= XILINX_DMA_BD_SOP; |
2258 | segment = list_last_entry(&desc->segments, |
2259 | struct xilinx_axidma_tx_segment, |
2260 | node); |
2261 | segment->hw.control |= XILINX_DMA_BD_EOP; |
2262 | } |
2263 | |
2264 | if (chan->xdev->has_axistream_connected) |
2265 | desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops; |
2266 | |
2267 | return &desc->async_tx; |
2268 | |
2269 | error: |
2270 | xilinx_dma_free_tx_descriptor(chan, desc); |
2271 | return NULL; |
2272 | } |
2273 | |
2274 | /** |
2275 | * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction |
2276 | * @dchan: DMA channel |
2277 | * @buf_addr: Physical address of the buffer |
2278 | * @buf_len: Total length of the cyclic buffers |
2279 | * @period_len: length of individual cyclic buffer |
2280 | * @direction: DMA direction |
2281 | * @flags: transfer ack flags |
2282 | * |
2283 | * Return: Async transaction descriptor on success and NULL on failure |
2284 | */ |
2285 | static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( |
2286 | struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, |
2287 | size_t period_len, enum dma_transfer_direction direction, |
2288 | unsigned long flags) |
2289 | { |
2290 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
2291 | struct xilinx_dma_tx_descriptor *desc; |
2292 | struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; |
2293 | size_t copy, sg_used; |
2294 | unsigned int num_periods; |
2295 | int i; |
2296 | u32 reg; |
2297 | |
2298 | if (!period_len) |
2299 | return NULL; |
2300 | |
2301 | num_periods = buf_len / period_len; |
2302 | |
2303 | if (!num_periods) |
2304 | return NULL; |
2305 | |
2306 | if (!is_slave_direction(direction)) |
2307 | return NULL; |
2308 | |
2309 | /* Allocate a transaction descriptor. */ |
2310 | desc = xilinx_dma_alloc_tx_descriptor(chan); |
2311 | if (!desc) |
2312 | return NULL; |
2313 | |
2314 | chan->direction = direction; |
2315 | dma_async_tx_descriptor_init(tx: &desc->async_tx, chan: &chan->common); |
2316 | desc->async_tx.tx_submit = xilinx_dma_tx_submit; |
2317 | |
2318 | for (i = 0; i < num_periods; ++i) { |
2319 | sg_used = 0; |
2320 | |
2321 | while (sg_used < period_len) { |
2322 | struct xilinx_axidma_desc_hw *hw; |
2323 | |
2324 | /* Get a free segment */ |
2325 | segment = xilinx_axidma_alloc_tx_segment(chan); |
2326 | if (!segment) |
2327 | goto error; |
2328 | |
2329 | /* |
2330 | * Calculate the maximum number of bytes to transfer, |
2331 | * making sure it is less than the hw limit |
2332 | */ |
2333 | copy = xilinx_dma_calc_copysize(chan, size: period_len, |
2334 | done: sg_used); |
2335 | hw = &segment->hw; |
2336 | xilinx_axidma_buf(chan, hw, buf_addr, sg_used, |
2337 | period_len: period_len * i); |
2338 | hw->control = copy; |
2339 | |
2340 | if (prev) |
2341 | prev->hw.next_desc = segment->phys; |
2342 | |
2343 | prev = segment; |
2344 | sg_used += copy; |
2345 | |
2346 | /* |
2347 | * Insert the segment into the descriptor segments |
2348 | * list. |
2349 | */ |
2350 | list_add_tail(new: &segment->node, head: &desc->segments); |
2351 | } |
2352 | } |
2353 | |
2354 | head_segment = list_first_entry(&desc->segments, |
2355 | struct xilinx_axidma_tx_segment, node); |
2356 | desc->async_tx.phys = head_segment->phys; |
2357 | |
2358 | desc->cyclic = true; |
2359 | reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); |
2360 | reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; |
2361 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, value: reg); |
2362 | |
2363 | segment = list_last_entry(&desc->segments, |
2364 | struct xilinx_axidma_tx_segment, |
2365 | node); |
2366 | segment->hw.next_desc = (u32) head_segment->phys; |
2367 | |
2368 | /* For the last DMA_MEM_TO_DEV transfer, set EOP */ |
2369 | if (direction == DMA_MEM_TO_DEV) { |
2370 | head_segment->hw.control |= XILINX_DMA_BD_SOP; |
2371 | segment->hw.control |= XILINX_DMA_BD_EOP; |
2372 | } |
2373 | |
2374 | return &desc->async_tx; |
2375 | |
2376 | error: |
2377 | xilinx_dma_free_tx_descriptor(chan, desc); |
2378 | return NULL; |
2379 | } |
2380 | |
2381 | /** |
2382 | * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction |
2383 | * @dchan: DMA channel |
2384 | * @sgl: scatterlist to transfer to/from |
2385 | * @sg_len: number of entries in @scatterlist |
2386 | * @direction: DMA direction |
2387 | * @flags: transfer ack flags |
2388 | * @context: APP words of the descriptor |
2389 | * |
2390 | * Return: Async transaction descriptor on success and NULL on failure |
2391 | */ |
2392 | static struct dma_async_tx_descriptor * |
2393 | xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, |
2394 | unsigned int sg_len, |
2395 | enum dma_transfer_direction direction, |
2396 | unsigned long flags, void *context) |
2397 | { |
2398 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
2399 | struct xilinx_dma_tx_descriptor *desc; |
2400 | struct xilinx_aximcdma_tx_segment *segment = NULL; |
2401 | u32 *app_w = (u32 *)context; |
2402 | struct scatterlist *sg; |
2403 | size_t copy; |
2404 | size_t sg_used; |
2405 | unsigned int i; |
2406 | |
2407 | if (!is_slave_direction(direction)) |
2408 | return NULL; |
2409 | |
2410 | /* Allocate a transaction descriptor. */ |
2411 | desc = xilinx_dma_alloc_tx_descriptor(chan); |
2412 | if (!desc) |
2413 | return NULL; |
2414 | |
2415 | dma_async_tx_descriptor_init(tx: &desc->async_tx, chan: &chan->common); |
2416 | desc->async_tx.tx_submit = xilinx_dma_tx_submit; |
2417 | |
2418 | /* Build transactions using information in the scatter gather list */ |
2419 | for_each_sg(sgl, sg, sg_len, i) { |
2420 | sg_used = 0; |
2421 | |
2422 | /* Loop until the entire scatterlist entry is used */ |
2423 | while (sg_used < sg_dma_len(sg)) { |
2424 | struct xilinx_aximcdma_desc_hw *hw; |
2425 | |
2426 | /* Get a free segment */ |
2427 | segment = xilinx_aximcdma_alloc_tx_segment(chan); |
2428 | if (!segment) |
2429 | goto error; |
2430 | |
2431 | /* |
2432 | * Calculate the maximum number of bytes to transfer, |
2433 | * making sure it is less than the hw limit |
2434 | */ |
2435 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, |
2436 | chan->xdev->max_buffer_len); |
2437 | hw = &segment->hw; |
2438 | |
2439 | /* Fill in the descriptor */ |
2440 | xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg), |
2441 | sg_used); |
2442 | hw->control = copy; |
2443 | |
2444 | if (chan->direction == DMA_MEM_TO_DEV && app_w) { |
2445 | memcpy(hw->app, app_w, sizeof(u32) * |
2446 | XILINX_DMA_NUM_APP_WORDS); |
2447 | } |
2448 | |
2449 | sg_used += copy; |
2450 | /* |
2451 | * Insert the segment into the descriptor segments |
2452 | * list. |
2453 | */ |
2454 | list_add_tail(new: &segment->node, head: &desc->segments); |
2455 | } |
2456 | } |
2457 | |
2458 | segment = list_first_entry(&desc->segments, |
2459 | struct xilinx_aximcdma_tx_segment, node); |
2460 | desc->async_tx.phys = segment->phys; |
2461 | |
2462 | /* For the last DMA_MEM_TO_DEV transfer, set EOP */ |
2463 | if (chan->direction == DMA_MEM_TO_DEV) { |
2464 | segment->hw.control |= XILINX_MCDMA_BD_SOP; |
2465 | segment = list_last_entry(&desc->segments, |
2466 | struct xilinx_aximcdma_tx_segment, |
2467 | node); |
2468 | segment->hw.control |= XILINX_MCDMA_BD_EOP; |
2469 | } |
2470 | |
2471 | return &desc->async_tx; |
2472 | |
2473 | error: |
2474 | xilinx_dma_free_tx_descriptor(chan, desc); |
2475 | |
2476 | return NULL; |
2477 | } |
2478 | |
2479 | /** |
2480 | * xilinx_dma_terminate_all - Halt the channel and free descriptors |
2481 | * @dchan: Driver specific DMA Channel pointer |
2482 | * |
2483 | * Return: '0' always. |
2484 | */ |
2485 | static int xilinx_dma_terminate_all(struct dma_chan *dchan) |
2486 | { |
2487 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
2488 | u32 reg; |
2489 | int err; |
2490 | |
2491 | if (!chan->cyclic) { |
2492 | err = chan->stop_transfer(chan); |
2493 | if (err) { |
2494 | dev_err(chan->dev, "Cannot stop channel %p: %x\n", |
2495 | chan, dma_ctrl_read(chan, |
2496 | XILINX_DMA_REG_DMASR)); |
2497 | chan->err = true; |
2498 | } |
2499 | } |
2500 | |
2501 | xilinx_dma_chan_reset(chan); |
2502 | /* Remove and free all of the descriptors in the lists */ |
2503 | chan->terminating = true; |
2504 | xilinx_dma_free_descriptors(chan); |
2505 | chan->idle = true; |
2506 | |
2507 | if (chan->cyclic) { |
2508 | reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); |
2509 | reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; |
2510 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, value: reg); |
2511 | chan->cyclic = false; |
2512 | } |
2513 | |
2514 | if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) |
2515 | dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, |
2516 | XILINX_CDMA_CR_SGMODE); |
2517 | |
2518 | return 0; |
2519 | } |
2520 | |
2521 | static void xilinx_dma_synchronize(struct dma_chan *dchan) |
2522 | { |
2523 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
2524 | |
2525 | tasklet_kill(t: &chan->tasklet); |
2526 | } |
2527 | |
2528 | /** |
2529 | * xilinx_vdma_channel_set_config - Configure VDMA channel |
2530 | * Run-time configuration for Axi VDMA, supports: |
2531 | * . halt the channel |
2532 | * . configure interrupt coalescing and inter-packet delay threshold |
2533 | * . start/stop parking |
2534 | * . enable genlock |
2535 | * |
2536 | * @dchan: DMA channel |
2537 | * @cfg: VDMA device configuration pointer |
2538 | * |
2539 | * Return: '0' on success and failure value on error |
2540 | */ |
2541 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, |
2542 | struct xilinx_vdma_config *cfg) |
2543 | { |
2544 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
2545 | u32 dmacr; |
2546 | |
2547 | if (cfg->reset) |
2548 | return xilinx_dma_chan_reset(chan); |
2549 | |
2550 | dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); |
2551 | |
2552 | chan->config.frm_dly = cfg->frm_dly; |
2553 | chan->config.park = cfg->park; |
2554 | |
2555 | /* genlock settings */ |
2556 | chan->config.gen_lock = cfg->gen_lock; |
2557 | chan->config.master = cfg->master; |
2558 | |
2559 | dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN; |
2560 | if (cfg->gen_lock && chan->genlock) { |
2561 | dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; |
2562 | dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK; |
2563 | dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; |
2564 | } |
2565 | |
2566 | chan->config.frm_cnt_en = cfg->frm_cnt_en; |
2567 | chan->config.vflip_en = cfg->vflip_en; |
2568 | |
2569 | if (cfg->park) |
2570 | chan->config.park_frm = cfg->park_frm; |
2571 | else |
2572 | chan->config.park_frm = -1; |
2573 | |
2574 | chan->config.coalesc = cfg->coalesc; |
2575 | chan->config.delay = cfg->delay; |
2576 | |
2577 | if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { |
2578 | dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK; |
2579 | dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; |
2580 | chan->config.coalesc = cfg->coalesc; |
2581 | } |
2582 | |
2583 | if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { |
2584 | dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK; |
2585 | dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; |
2586 | chan->config.delay = cfg->delay; |
2587 | } |
2588 | |
2589 | /* FSync Source selection */ |
2590 | dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; |
2591 | dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; |
2592 | |
2593 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, value: dmacr); |
2594 | |
2595 | return 0; |
2596 | } |
2597 | EXPORT_SYMBOL(xilinx_vdma_channel_set_config); |
2598 | |
2599 | /* ----------------------------------------------------------------------------- |
2600 | * Probe and remove |
2601 | */ |
2602 | |
2603 | /** |
2604 | * xilinx_dma_chan_remove - Per Channel remove function |
2605 | * @chan: Driver specific DMA channel |
2606 | */ |
2607 | static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) |
2608 | { |
2609 | /* Disable all interrupts */ |
2610 | dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, |
2611 | XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
2612 | |
2613 | if (chan->irq > 0) |
2614 | free_irq(chan->irq, chan); |
2615 | |
2616 | tasklet_kill(t: &chan->tasklet); |
2617 | |
2618 | list_del(entry: &chan->common.device_node); |
2619 | } |
2620 | |
2621 | static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, |
2622 | struct clk **tx_clk, struct clk **rx_clk, |
2623 | struct clk **sg_clk, struct clk **tmp_clk) |
2624 | { |
2625 | int err; |
2626 | |
2627 | *tmp_clk = NULL; |
2628 | |
2629 | *axi_clk = devm_clk_get(dev: &pdev->dev, id: "s_axi_lite_aclk"); |
2630 | if (IS_ERR(ptr: *axi_clk)) |
2631 | return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: *axi_clk), fmt: "failed to get axi_aclk\n"); |
2632 | |
2633 | *tx_clk = devm_clk_get(dev: &pdev->dev, id: "m_axi_mm2s_aclk"); |
2634 | if (IS_ERR(ptr: *tx_clk)) |
2635 | *tx_clk = NULL; |
2636 | |
2637 | *rx_clk = devm_clk_get(dev: &pdev->dev, id: "m_axi_s2mm_aclk"); |
2638 | if (IS_ERR(ptr: *rx_clk)) |
2639 | *rx_clk = NULL; |
2640 | |
2641 | *sg_clk = devm_clk_get(dev: &pdev->dev, id: "m_axi_sg_aclk"); |
2642 | if (IS_ERR(ptr: *sg_clk)) |
2643 | *sg_clk = NULL; |
2644 | |
2645 | err = clk_prepare_enable(clk: *axi_clk); |
2646 | if (err) { |
2647 | dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); |
2648 | return err; |
2649 | } |
2650 | |
2651 | err = clk_prepare_enable(clk: *tx_clk); |
2652 | if (err) { |
2653 | dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); |
2654 | goto err_disable_axiclk; |
2655 | } |
2656 | |
2657 | err = clk_prepare_enable(clk: *rx_clk); |
2658 | if (err) { |
2659 | dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); |
2660 | goto err_disable_txclk; |
2661 | } |
2662 | |
2663 | err = clk_prepare_enable(clk: *sg_clk); |
2664 | if (err) { |
2665 | dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); |
2666 | goto err_disable_rxclk; |
2667 | } |
2668 | |
2669 | return 0; |
2670 | |
2671 | err_disable_rxclk: |
2672 | clk_disable_unprepare(clk: *rx_clk); |
2673 | err_disable_txclk: |
2674 | clk_disable_unprepare(clk: *tx_clk); |
2675 | err_disable_axiclk: |
2676 | clk_disable_unprepare(clk: *axi_clk); |
2677 | |
2678 | return err; |
2679 | } |
2680 | |
2681 | static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, |
2682 | struct clk **dev_clk, struct clk **tmp_clk, |
2683 | struct clk **tmp1_clk, struct clk **tmp2_clk) |
2684 | { |
2685 | int err; |
2686 | |
2687 | *tmp_clk = NULL; |
2688 | *tmp1_clk = NULL; |
2689 | *tmp2_clk = NULL; |
2690 | |
2691 | *axi_clk = devm_clk_get(dev: &pdev->dev, id: "s_axi_lite_aclk"); |
2692 | if (IS_ERR(ptr: *axi_clk)) |
2693 | return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: *axi_clk), fmt: "failed to get axi_aclk\n"); |
2694 | |
2695 | *dev_clk = devm_clk_get(dev: &pdev->dev, id: "m_axi_aclk"); |
2696 | if (IS_ERR(ptr: *dev_clk)) |
2697 | return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: *dev_clk), fmt: "failed to get dev_clk\n"); |
2698 | |
2699 | err = clk_prepare_enable(clk: *axi_clk); |
2700 | if (err) { |
2701 | dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); |
2702 | return err; |
2703 | } |
2704 | |
2705 | err = clk_prepare_enable(clk: *dev_clk); |
2706 | if (err) { |
2707 | dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); |
2708 | goto err_disable_axiclk; |
2709 | } |
2710 | |
2711 | return 0; |
2712 | |
2713 | err_disable_axiclk: |
2714 | clk_disable_unprepare(clk: *axi_clk); |
2715 | |
2716 | return err; |
2717 | } |
2718 | |
2719 | static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, |
2720 | struct clk **tx_clk, struct clk **txs_clk, |
2721 | struct clk **rx_clk, struct clk **rxs_clk) |
2722 | { |
2723 | int err; |
2724 | |
2725 | *axi_clk = devm_clk_get(dev: &pdev->dev, id: "s_axi_lite_aclk"); |
2726 | if (IS_ERR(ptr: *axi_clk)) |
2727 | return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: *axi_clk), fmt: "failed to get axi_aclk\n"); |
2728 | |
2729 | *tx_clk = devm_clk_get(dev: &pdev->dev, id: "m_axi_mm2s_aclk"); |
2730 | if (IS_ERR(ptr: *tx_clk)) |
2731 | *tx_clk = NULL; |
2732 | |
2733 | *txs_clk = devm_clk_get(dev: &pdev->dev, id: "m_axis_mm2s_aclk"); |
2734 | if (IS_ERR(ptr: *txs_clk)) |
2735 | *txs_clk = NULL; |
2736 | |
2737 | *rx_clk = devm_clk_get(dev: &pdev->dev, id: "m_axi_s2mm_aclk"); |
2738 | if (IS_ERR(ptr: *rx_clk)) |
2739 | *rx_clk = NULL; |
2740 | |
2741 | *rxs_clk = devm_clk_get(dev: &pdev->dev, id: "s_axis_s2mm_aclk"); |
2742 | if (IS_ERR(ptr: *rxs_clk)) |
2743 | *rxs_clk = NULL; |
2744 | |
2745 | err = clk_prepare_enable(clk: *axi_clk); |
2746 | if (err) { |
2747 | dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", |
2748 | err); |
2749 | return err; |
2750 | } |
2751 | |
2752 | err = clk_prepare_enable(clk: *tx_clk); |
2753 | if (err) { |
2754 | dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); |
2755 | goto err_disable_axiclk; |
2756 | } |
2757 | |
2758 | err = clk_prepare_enable(clk: *txs_clk); |
2759 | if (err) { |
2760 | dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); |
2761 | goto err_disable_txclk; |
2762 | } |
2763 | |
2764 | err = clk_prepare_enable(clk: *rx_clk); |
2765 | if (err) { |
2766 | dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); |
2767 | goto err_disable_txsclk; |
2768 | } |
2769 | |
2770 | err = clk_prepare_enable(clk: *rxs_clk); |
2771 | if (err) { |
2772 | dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); |
2773 | goto err_disable_rxclk; |
2774 | } |
2775 | |
2776 | return 0; |
2777 | |
2778 | err_disable_rxclk: |
2779 | clk_disable_unprepare(clk: *rx_clk); |
2780 | err_disable_txsclk: |
2781 | clk_disable_unprepare(clk: *txs_clk); |
2782 | err_disable_txclk: |
2783 | clk_disable_unprepare(clk: *tx_clk); |
2784 | err_disable_axiclk: |
2785 | clk_disable_unprepare(clk: *axi_clk); |
2786 | |
2787 | return err; |
2788 | } |
2789 | |
2790 | static void xdma_disable_allclks(struct xilinx_dma_device *xdev) |
2791 | { |
2792 | clk_disable_unprepare(clk: xdev->rxs_clk); |
2793 | clk_disable_unprepare(clk: xdev->rx_clk); |
2794 | clk_disable_unprepare(clk: xdev->txs_clk); |
2795 | clk_disable_unprepare(clk: xdev->tx_clk); |
2796 | clk_disable_unprepare(clk: xdev->axi_clk); |
2797 | } |
2798 | |
2799 | /** |
2800 | * xilinx_dma_chan_probe - Per Channel Probing |
2801 | * It get channel features from the device tree entry and |
2802 | * initialize special channel handling routines |
2803 | * |
2804 | * @xdev: Driver specific device structure |
2805 | * @node: Device node |
2806 | * |
2807 | * Return: '0' on success and failure value on error |
2808 | */ |
2809 | static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, |
2810 | struct device_node *node) |
2811 | { |
2812 | struct xilinx_dma_chan *chan; |
2813 | bool has_dre = false; |
2814 | u32 value, width; |
2815 | int err; |
2816 | |
2817 | /* Allocate and initialize the channel structure */ |
2818 | chan = devm_kzalloc(dev: xdev->dev, size: sizeof(*chan), GFP_KERNEL); |
2819 | if (!chan) |
2820 | return -ENOMEM; |
2821 | |
2822 | chan->dev = xdev->dev; |
2823 | chan->xdev = xdev; |
2824 | chan->desc_pendingcount = 0x0; |
2825 | chan->ext_addr = xdev->ext_addr; |
2826 | /* This variable ensures that descriptors are not |
2827 | * Submitted when dma engine is in progress. This variable is |
2828 | * Added to avoid polling for a bit in the status register to |
2829 | * Know dma state in the driver hot path. |
2830 | */ |
2831 | chan->idle = true; |
2832 | |
2833 | spin_lock_init(&chan->lock); |
2834 | INIT_LIST_HEAD(list: &chan->pending_list); |
2835 | INIT_LIST_HEAD(list: &chan->done_list); |
2836 | INIT_LIST_HEAD(list: &chan->active_list); |
2837 | INIT_LIST_HEAD(list: &chan->free_seg_list); |
2838 | |
2839 | /* Retrieve the channel properties from the device tree */ |
2840 | has_dre = of_property_read_bool(np: node, propname: "xlnx,include-dre"); |
2841 | |
2842 | of_property_read_u8(np: node, propname: "xlnx,irq-delay", out_value: &chan->irq_delay); |
2843 | |
2844 | chan->genlock = of_property_read_bool(np: node, propname: "xlnx,genlock-mode"); |
2845 | |
2846 | err = of_property_read_u32(np: node, propname: "xlnx,datawidth", out_value: &value); |
2847 | if (err) { |
2848 | dev_err(xdev->dev, "missing xlnx,datawidth property\n"); |
2849 | return err; |
2850 | } |
2851 | width = value >> 3; /* Convert bits to bytes */ |
2852 | |
2853 | /* If data width is greater than 8 bytes, DRE is not in hw */ |
2854 | if (width > 8) |
2855 | has_dre = false; |
2856 | |
2857 | if (!has_dre) |
2858 | xdev->common.copy_align = (enum dmaengine_alignment)fls(x: width - 1); |
2859 | |
2860 | if (of_device_is_compatible(device: node, "xlnx,axi-vdma-mm2s-channel") || |
2861 | of_device_is_compatible(device: node, "xlnx,axi-dma-mm2s-channel") || |
2862 | of_device_is_compatible(device: node, "xlnx,axi-cdma-channel")) { |
2863 | chan->direction = DMA_MEM_TO_DEV; |
2864 | chan->id = xdev->mm2s_chan_id++; |
2865 | chan->tdest = chan->id; |
2866 | |
2867 | chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; |
2868 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
2869 | chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; |
2870 | chan->config.park = 1; |
2871 | |
2872 | if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || |
2873 | xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) |
2874 | chan->flush_on_fsync = true; |
2875 | } |
2876 | } else if (of_device_is_compatible(device: node, |
2877 | "xlnx,axi-vdma-s2mm-channel") || |
2878 | of_device_is_compatible(device: node, |
2879 | "xlnx,axi-dma-s2mm-channel")) { |
2880 | chan->direction = DMA_DEV_TO_MEM; |
2881 | chan->id = xdev->s2mm_chan_id++; |
2882 | chan->tdest = chan->id - xdev->dma_config->max_channels / 2; |
2883 | chan->has_vflip = of_property_read_bool(np: node, |
2884 | propname: "xlnx,enable-vert-flip"); |
2885 | if (chan->has_vflip) { |
2886 | chan->config.vflip_en = dma_read(chan, |
2887 | XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) & |
2888 | XILINX_VDMA_ENABLE_VERTICAL_FLIP; |
2889 | } |
2890 | |
2891 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) |
2892 | chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET; |
2893 | else |
2894 | chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; |
2895 | |
2896 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
2897 | chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; |
2898 | chan->config.park = 1; |
2899 | |
2900 | if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || |
2901 | xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) |
2902 | chan->flush_on_fsync = true; |
2903 | } |
2904 | } else { |
2905 | dev_err(xdev->dev, "Invalid channel compatible node\n"); |
2906 | return -EINVAL; |
2907 | } |
2908 | |
2909 | /* Request the interrupt */ |
2910 | chan->irq = of_irq_get(dev: node, index: chan->tdest); |
2911 | if (chan->irq < 0) |
2912 | return dev_err_probe(dev: xdev->dev, err: chan->irq, fmt: "failed to get irq\n"); |
2913 | err = request_irq(irq: chan->irq, handler: xdev->dma_config->irq_handler, |
2914 | IRQF_SHARED, name: "xilinx-dma-controller", dev: chan); |
2915 | if (err) { |
2916 | dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); |
2917 | return err; |
2918 | } |
2919 | |
2920 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
2921 | chan->start_transfer = xilinx_dma_start_transfer; |
2922 | chan->stop_transfer = xilinx_dma_stop_transfer; |
2923 | } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { |
2924 | chan->start_transfer = xilinx_mcdma_start_transfer; |
2925 | chan->stop_transfer = xilinx_dma_stop_transfer; |
2926 | } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
2927 | chan->start_transfer = xilinx_cdma_start_transfer; |
2928 | chan->stop_transfer = xilinx_cdma_stop_transfer; |
2929 | } else { |
2930 | chan->start_transfer = xilinx_vdma_start_transfer; |
2931 | chan->stop_transfer = xilinx_dma_stop_transfer; |
2932 | } |
2933 | |
2934 | /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */ |
2935 | if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) { |
2936 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA || |
2937 | dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & |
2938 | XILINX_DMA_DMASR_SG_MASK) |
2939 | chan->has_sg = true; |
2940 | dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, |
2941 | chan->has_sg ? "enabled": "disabled"); |
2942 | } |
2943 | |
2944 | /* Initialize the tasklet */ |
2945 | tasklet_setup(t: &chan->tasklet, callback: xilinx_dma_do_tasklet); |
2946 | |
2947 | /* |
2948 | * Initialize the DMA channel and add it to the DMA engine channels |
2949 | * list. |
2950 | */ |
2951 | chan->common.device = &xdev->common; |
2952 | |
2953 | list_add_tail(new: &chan->common.device_node, head: &xdev->common.channels); |
2954 | xdev->chan[chan->id] = chan; |
2955 | |
2956 | /* Reset the channel */ |
2957 | err = xilinx_dma_chan_reset(chan); |
2958 | if (err < 0) { |
2959 | dev_err(xdev->dev, "Reset channel failed\n"); |
2960 | return err; |
2961 | } |
2962 | |
2963 | return 0; |
2964 | } |
2965 | |
2966 | /** |
2967 | * xilinx_dma_child_probe - Per child node probe |
2968 | * It get number of dma-channels per child node from |
2969 | * device-tree and initializes all the channels. |
2970 | * |
2971 | * @xdev: Driver specific device structure |
2972 | * @node: Device node |
2973 | * |
2974 | * Return: '0' on success and failure value on error. |
2975 | */ |
2976 | static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, |
2977 | struct device_node *node) |
2978 | { |
2979 | int ret, i; |
2980 | u32 nr_channels = 1; |
2981 | |
2982 | ret = of_property_read_u32(np: node, propname: "dma-channels", out_value: &nr_channels); |
2983 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0) |
2984 | dev_warn(xdev->dev, "missing dma-channels property\n"); |
2985 | |
2986 | for (i = 0; i < nr_channels; i++) { |
2987 | ret = xilinx_dma_chan_probe(xdev, node); |
2988 | if (ret) |
2989 | return ret; |
2990 | } |
2991 | |
2992 | return 0; |
2993 | } |
2994 | |
2995 | /** |
2996 | * of_dma_xilinx_xlate - Translation function |
2997 | * @dma_spec: Pointer to DMA specifier as found in the device tree |
2998 | * @ofdma: Pointer to DMA controller data |
2999 | * |
3000 | * Return: DMA channel pointer on success and NULL on error |
3001 | */ |
3002 | static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, |
3003 | struct of_dma *ofdma) |
3004 | { |
3005 | struct xilinx_dma_device *xdev = ofdma->of_dma_data; |
3006 | int chan_id = dma_spec->args[0]; |
3007 | |
3008 | if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id]) |
3009 | return NULL; |
3010 | |
3011 | return dma_get_slave_channel(chan: &xdev->chan[chan_id]->common); |
3012 | } |
3013 | |
3014 | static const struct xilinx_dma_config axidma_config = { |
3015 | .dmatype = XDMA_TYPE_AXIDMA, |
3016 | .clk_init = axidma_clk_init, |
3017 | .irq_handler = xilinx_dma_irq_handler, |
3018 | .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE, |
3019 | }; |
3020 | |
3021 | static const struct xilinx_dma_config aximcdma_config = { |
3022 | .dmatype = XDMA_TYPE_AXIMCDMA, |
3023 | .clk_init = axidma_clk_init, |
3024 | .irq_handler = xilinx_mcdma_irq_handler, |
3025 | .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE, |
3026 | }; |
3027 | static const struct xilinx_dma_config axicdma_config = { |
3028 | .dmatype = XDMA_TYPE_CDMA, |
3029 | .clk_init = axicdma_clk_init, |
3030 | .irq_handler = xilinx_dma_irq_handler, |
3031 | .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE, |
3032 | }; |
3033 | |
3034 | static const struct xilinx_dma_config axivdma_config = { |
3035 | .dmatype = XDMA_TYPE_VDMA, |
3036 | .clk_init = axivdma_clk_init, |
3037 | .irq_handler = xilinx_dma_irq_handler, |
3038 | .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE, |
3039 | }; |
3040 | |
3041 | static const struct of_device_id xilinx_dma_of_ids[] = { |
3042 | { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, |
3043 | { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, |
3044 | { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, |
3045 | { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config }, |
3046 | {} |
3047 | }; |
3048 | MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); |
3049 | |
3050 | /** |
3051 | * xilinx_dma_probe - Driver probe function |
3052 | * @pdev: Pointer to the platform_device structure |
3053 | * |
3054 | * Return: '0' on success and failure value on error |
3055 | */ |
3056 | static int xilinx_dma_probe(struct platform_device *pdev) |
3057 | { |
3058 | int (*clk_init)(struct platform_device *, struct clk **, struct clk **, |
3059 | struct clk **, struct clk **, struct clk **) |
3060 | = axivdma_clk_init; |
3061 | struct device_node *node = pdev->dev.of_node; |
3062 | struct xilinx_dma_device *xdev; |
3063 | struct device_node *child, *np = pdev->dev.of_node; |
3064 | u32 num_frames, addr_width, len_width; |
3065 | int i, err; |
3066 | |
3067 | /* Allocate and initialize the DMA engine structure */ |
3068 | xdev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*xdev), GFP_KERNEL); |
3069 | if (!xdev) |
3070 | return -ENOMEM; |
3071 | |
3072 | xdev->dev = &pdev->dev; |
3073 | if (np) { |
3074 | const struct of_device_id *match; |
3075 | |
3076 | match = of_match_node(matches: xilinx_dma_of_ids, node: np); |
3077 | if (match && match->data) { |
3078 | xdev->dma_config = match->data; |
3079 | clk_init = xdev->dma_config->clk_init; |
3080 | } |
3081 | } |
3082 | |
3083 | err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, |
3084 | &xdev->rx_clk, &xdev->rxs_clk); |
3085 | if (err) |
3086 | return err; |
3087 | |
3088 | /* Request and map I/O memory */ |
3089 | xdev->regs = devm_platform_ioremap_resource(pdev, index: 0); |
3090 | if (IS_ERR(ptr: xdev->regs)) { |
3091 | err = PTR_ERR(ptr: xdev->regs); |
3092 | goto disable_clks; |
3093 | } |
3094 | /* Retrieve the DMA engine properties from the device tree */ |
3095 | xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); |
3096 | xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2; |
3097 | |
3098 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA || |
3099 | xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { |
3100 | if (!of_property_read_u32(np: node, propname: "xlnx,sg-length-width", |
3101 | out_value: &len_width)) { |
3102 | if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || |
3103 | len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { |
3104 | dev_warn(xdev->dev, |
3105 | "invalid xlnx,sg-length-width property value. Using default width\n"); |
3106 | } else { |
3107 | if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) |
3108 | dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); |
3109 | xdev->max_buffer_len = |
3110 | GENMASK(len_width - 1, 0); |
3111 | } |
3112 | } |
3113 | } |
3114 | |
3115 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
3116 | xdev->has_axistream_connected = |
3117 | of_property_read_bool(np: node, propname: "xlnx,axistream-connected"); |
3118 | } |
3119 | |
3120 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
3121 | err = of_property_read_u32(np: node, propname: "xlnx,num-fstores", |
3122 | out_value: &num_frames); |
3123 | if (err < 0) { |
3124 | dev_err(xdev->dev, |
3125 | "missing xlnx,num-fstores property\n"); |
3126 | goto disable_clks; |
3127 | } |
3128 | |
3129 | err = of_property_read_u32(np: node, propname: "xlnx,flush-fsync", |
3130 | out_value: &xdev->flush_on_fsync); |
3131 | if (err < 0) |
3132 | dev_warn(xdev->dev, |
3133 | "missing xlnx,flush-fsync property\n"); |
3134 | } |
3135 | |
3136 | err = of_property_read_u32(np: node, propname: "xlnx,addrwidth", out_value: &addr_width); |
3137 | if (err < 0) |
3138 | dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); |
3139 | |
3140 | if (addr_width > 32) |
3141 | xdev->ext_addr = true; |
3142 | else |
3143 | xdev->ext_addr = false; |
3144 | |
3145 | /* Set metadata mode */ |
3146 | if (xdev->has_axistream_connected) |
3147 | xdev->common.desc_metadata_modes = DESC_METADATA_ENGINE; |
3148 | |
3149 | /* Set the dma mask bits */ |
3150 | err = dma_set_mask_and_coherent(dev: xdev->dev, DMA_BIT_MASK(addr_width)); |
3151 | if (err < 0) { |
3152 | dev_err(xdev->dev, "DMA mask error %d\n", err); |
3153 | goto disable_clks; |
3154 | } |
3155 | |
3156 | /* Initialize the DMA engine */ |
3157 | xdev->common.dev = &pdev->dev; |
3158 | |
3159 | INIT_LIST_HEAD(list: &xdev->common.channels); |
3160 | if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { |
3161 | dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); |
3162 | dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); |
3163 | } |
3164 | |
3165 | xdev->common.device_alloc_chan_resources = |
3166 | xilinx_dma_alloc_chan_resources; |
3167 | xdev->common.device_free_chan_resources = |
3168 | xilinx_dma_free_chan_resources; |
3169 | xdev->common.device_terminate_all = xilinx_dma_terminate_all; |
3170 | xdev->common.device_synchronize = xilinx_dma_synchronize; |
3171 | xdev->common.device_tx_status = xilinx_dma_tx_status; |
3172 | xdev->common.device_issue_pending = xilinx_dma_issue_pending; |
3173 | xdev->common.device_config = xilinx_dma_device_config; |
3174 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
3175 | dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); |
3176 | xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; |
3177 | xdev->common.device_prep_dma_cyclic = |
3178 | xilinx_dma_prep_dma_cyclic; |
3179 | /* Residue calculation is supported by only AXI DMA and CDMA */ |
3180 | xdev->common.residue_granularity = |
3181 | DMA_RESIDUE_GRANULARITY_SEGMENT; |
3182 | } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
3183 | dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); |
3184 | xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; |
3185 | /* Residue calculation is supported by only AXI DMA and CDMA */ |
3186 | xdev->common.residue_granularity = |
3187 | DMA_RESIDUE_GRANULARITY_SEGMENT; |
3188 | } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { |
3189 | xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg; |
3190 | } else { |
3191 | xdev->common.device_prep_interleaved_dma = |
3192 | xilinx_vdma_dma_prep_interleaved; |
3193 | } |
3194 | |
3195 | platform_set_drvdata(pdev, data: xdev); |
3196 | |
3197 | /* Initialize the channels */ |
3198 | for_each_child_of_node(node, child) { |
3199 | err = xilinx_dma_child_probe(xdev, node: child); |
3200 | if (err < 0) { |
3201 | of_node_put(node: child); |
3202 | goto error; |
3203 | } |
3204 | } |
3205 | |
3206 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
3207 | for (i = 0; i < xdev->dma_config->max_channels; i++) |
3208 | if (xdev->chan[i]) |
3209 | xdev->chan[i]->num_frms = num_frames; |
3210 | } |
3211 | |
3212 | /* Register the DMA engine with the core */ |
3213 | err = dma_async_device_register(device: &xdev->common); |
3214 | if (err) { |
3215 | dev_err(xdev->dev, "failed to register the dma device\n"); |
3216 | goto error; |
3217 | } |
3218 | |
3219 | err = of_dma_controller_register(np: node, of_dma_xlate: of_dma_xilinx_xlate, |
3220 | data: xdev); |
3221 | if (err < 0) { |
3222 | dev_err(&pdev->dev, "Unable to register DMA to DT\n"); |
3223 | dma_async_device_unregister(device: &xdev->common); |
3224 | goto error; |
3225 | } |
3226 | |
3227 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) |
3228 | dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); |
3229 | else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) |
3230 | dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); |
3231 | else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) |
3232 | dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n"); |
3233 | else |
3234 | dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); |
3235 | |
3236 | return 0; |
3237 | |
3238 | error: |
3239 | for (i = 0; i < xdev->dma_config->max_channels; i++) |
3240 | if (xdev->chan[i]) |
3241 | xilinx_dma_chan_remove(chan: xdev->chan[i]); |
3242 | disable_clks: |
3243 | xdma_disable_allclks(xdev); |
3244 | |
3245 | return err; |
3246 | } |
3247 | |
3248 | /** |
3249 | * xilinx_dma_remove - Driver remove function |
3250 | * @pdev: Pointer to the platform_device structure |
3251 | */ |
3252 | static void xilinx_dma_remove(struct platform_device *pdev) |
3253 | { |
3254 | struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); |
3255 | int i; |
3256 | |
3257 | of_dma_controller_free(np: pdev->dev.of_node); |
3258 | |
3259 | dma_async_device_unregister(device: &xdev->common); |
3260 | |
3261 | for (i = 0; i < xdev->dma_config->max_channels; i++) |
3262 | if (xdev->chan[i]) |
3263 | xilinx_dma_chan_remove(chan: xdev->chan[i]); |
3264 | |
3265 | xdma_disable_allclks(xdev); |
3266 | } |
3267 | |
3268 | static struct platform_driver xilinx_vdma_driver = { |
3269 | .driver = { |
3270 | .name = "xilinx-vdma", |
3271 | .of_match_table = xilinx_dma_of_ids, |
3272 | }, |
3273 | .probe = xilinx_dma_probe, |
3274 | .remove_new = xilinx_dma_remove, |
3275 | }; |
3276 | |
3277 | module_platform_driver(xilinx_vdma_driver); |
3278 | |
3279 | MODULE_AUTHOR("Xilinx, Inc."); |
3280 | MODULE_DESCRIPTION("Xilinx VDMA driver"); |
3281 | MODULE_LICENSE("GPL v2"); |
3282 |
Definitions
- xilinx_vdma_desc_hw
- xilinx_axidma_desc_hw
- xilinx_aximcdma_desc_hw
- xilinx_cdma_desc_hw
- xilinx_vdma_tx_segment
- xilinx_axidma_tx_segment
- xilinx_aximcdma_tx_segment
- xilinx_cdma_tx_segment
- xilinx_dma_tx_descriptor
- xilinx_dma_chan
- xdma_ip_type
- xilinx_dma_config
- xilinx_dma_device
- dma_read
- dma_write
- vdma_desc_write
- dma_ctrl_read
- dma_ctrl_write
- dma_ctrl_clr
- dma_ctrl_set
- vdma_desc_write_64
- dma_writeq
- xilinx_write
- xilinx_axidma_buf
- xilinx_aximcdma_buf
- xilinx_dma_get_metadata_ptr
- xilinx_dma_metadata_ops
- xilinx_vdma_alloc_tx_segment
- xilinx_cdma_alloc_tx_segment
- xilinx_axidma_alloc_tx_segment
- xilinx_aximcdma_alloc_tx_segment
- xilinx_dma_clean_hw_desc
- xilinx_mcdma_clean_hw_desc
- xilinx_dma_free_tx_segment
- xilinx_mcdma_free_tx_segment
- xilinx_cdma_free_tx_segment
- xilinx_vdma_free_tx_segment
- xilinx_dma_alloc_tx_descriptor
- xilinx_dma_free_tx_descriptor
- xilinx_dma_free_desc_list
- xilinx_dma_free_descriptors
- xilinx_dma_free_chan_resources
- xilinx_dma_get_residue
- xilinx_dma_chan_handle_cyclic
- xilinx_dma_chan_desc_cleanup
- xilinx_dma_do_tasklet
- xilinx_dma_alloc_chan_resources
- xilinx_dma_calc_copysize
- xilinx_dma_tx_status
- xilinx_dma_stop_transfer
- xilinx_cdma_stop_transfer
- xilinx_dma_start
- xilinx_vdma_start_transfer
- xilinx_cdma_start_transfer
- xilinx_dma_start_transfer
- xilinx_mcdma_start_transfer
- xilinx_dma_issue_pending
- xilinx_dma_device_config
- xilinx_dma_complete_descriptor
- xilinx_dma_reset
- xilinx_dma_chan_reset
- xilinx_mcdma_irq_handler
- xilinx_dma_irq_handler
- append_desc_queue
- xilinx_dma_tx_submit
- xilinx_vdma_dma_prep_interleaved
- xilinx_cdma_prep_memcpy
- xilinx_dma_prep_slave_sg
- xilinx_dma_prep_dma_cyclic
- xilinx_mcdma_prep_slave_sg
- xilinx_dma_terminate_all
- xilinx_dma_synchronize
- xilinx_vdma_channel_set_config
- xilinx_dma_chan_remove
- axidma_clk_init
- axicdma_clk_init
- axivdma_clk_init
- xdma_disable_allclks
- xilinx_dma_chan_probe
- xilinx_dma_child_probe
- of_dma_xilinx_xlate
- axidma_config
- aximcdma_config
- axicdma_config
- axivdma_config
- xilinx_dma_of_ids
- xilinx_dma_probe
- xilinx_dma_remove
Improve your Profiling and Debugging skills
Find out more