1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (C) 2014 Emilio López |
4 | * Emilio López <emilio@elopez.com.ar> |
5 | */ |
6 | |
7 | #include <linux/bitmap.h> |
8 | #include <linux/bitops.h> |
9 | #include <linux/clk.h> |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/dmaengine.h> |
12 | #include <linux/dmapool.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/module.h> |
15 | #include <linux/of_dma.h> |
16 | #include <linux/platform_device.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/spinlock.h> |
19 | |
20 | #include "virt-dma.h" |
21 | |
22 | /** Common macros to normal and dedicated DMA registers **/ |
23 | |
24 | #define SUN4I_DMA_CFG_LOADING BIT(31) |
25 | #define SUN4I_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 25) |
26 | #define SUN4I_DMA_CFG_DST_BURST_LENGTH(len) ((len) << 23) |
27 | #define SUN4I_DMA_CFG_DST_ADDR_MODE(mode) ((mode) << 21) |
28 | #define SUN4I_DMA_CFG_DST_DRQ_TYPE(type) ((type) << 16) |
29 | #define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9) |
30 | #define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7) |
31 | #define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5) |
32 | #define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type) |
33 | |
34 | /** Normal DMA register values **/ |
35 | |
36 | /* Normal DMA source/destination data request type values */ |
37 | #define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16 |
38 | #define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1) |
39 | |
40 | /** Normal DMA register layout **/ |
41 | |
42 | /* Dedicated DMA source/destination address mode values */ |
43 | #define SUN4I_NDMA_ADDR_MODE_LINEAR 0 |
44 | #define SUN4I_NDMA_ADDR_MODE_IO 1 |
45 | |
46 | /* Normal DMA configuration register layout */ |
47 | #define SUN4I_NDMA_CFG_CONT_MODE BIT(30) |
48 | #define SUN4I_NDMA_CFG_WAIT_STATE(n) ((n) << 27) |
49 | #define SUN4I_NDMA_CFG_DST_NON_SECURE BIT(22) |
50 | #define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15) |
51 | #define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6) |
52 | |
53 | /** Dedicated DMA register values **/ |
54 | |
55 | /* Dedicated DMA source/destination address mode values */ |
56 | #define SUN4I_DDMA_ADDR_MODE_LINEAR 0 |
57 | #define SUN4I_DDMA_ADDR_MODE_IO 1 |
58 | #define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE 2 |
59 | #define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE 3 |
60 | |
61 | /* Dedicated DMA source/destination data request type values */ |
62 | #define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1 |
63 | #define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1) |
64 | |
65 | /** Dedicated DMA register layout **/ |
66 | |
67 | /* Dedicated DMA configuration register layout */ |
68 | #define SUN4I_DDMA_CFG_BUSY BIT(30) |
69 | #define SUN4I_DDMA_CFG_CONT_MODE BIT(29) |
70 | #define SUN4I_DDMA_CFG_DST_NON_SECURE BIT(28) |
71 | #define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15) |
72 | #define SUN4I_DDMA_CFG_SRC_NON_SECURE BIT(12) |
73 | |
74 | /* Dedicated DMA parameter register layout */ |
75 | #define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n) (((n) - 1) << 24) |
76 | #define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n) (((n) - 1) << 16) |
77 | #define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8) |
78 | #define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0) |
79 | |
80 | /** DMA register offsets **/ |
81 | |
82 | /* General register offsets */ |
83 | #define SUN4I_DMA_IRQ_ENABLE_REG 0x0 |
84 | #define SUN4I_DMA_IRQ_PENDING_STATUS_REG 0x4 |
85 | |
86 | /* Normal DMA register offsets */ |
87 | #define SUN4I_NDMA_CHANNEL_REG_BASE(n) (0x100 + (n) * 0x20) |
88 | #define SUN4I_NDMA_CFG_REG 0x0 |
89 | #define SUN4I_NDMA_SRC_ADDR_REG 0x4 |
90 | #define SUN4I_NDMA_DST_ADDR_REG 0x8 |
91 | #define SUN4I_NDMA_BYTE_COUNT_REG 0xC |
92 | |
93 | /* Dedicated DMA register offsets */ |
94 | #define SUN4I_DDMA_CHANNEL_REG_BASE(n) (0x300 + (n) * 0x20) |
95 | #define SUN4I_DDMA_CFG_REG 0x0 |
96 | #define SUN4I_DDMA_SRC_ADDR_REG 0x4 |
97 | #define SUN4I_DDMA_DST_ADDR_REG 0x8 |
98 | #define SUN4I_DDMA_BYTE_COUNT_REG 0xC |
99 | #define SUN4I_DDMA_PARA_REG 0x18 |
100 | |
101 | /** DMA Driver **/ |
102 | |
103 | /* |
104 | * Normal DMA has 8 channels, and Dedicated DMA has another 8, so |
105 | * that's 16 channels. As for endpoints, there's 29 and 21 |
106 | * respectively. Given that the Normal DMA endpoints (other than |
107 | * SDRAM) can be used as tx/rx, we need 78 vchans in total |
108 | */ |
109 | #define SUN4I_NDMA_NR_MAX_CHANNELS 8 |
110 | #define SUN4I_DDMA_NR_MAX_CHANNELS 8 |
111 | #define SUN4I_DMA_NR_MAX_CHANNELS \ |
112 | (SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS) |
113 | #define SUN4I_NDMA_NR_MAX_VCHANS (29 * 2 - 1) |
114 | #define SUN4I_DDMA_NR_MAX_VCHANS 21 |
115 | #define SUN4I_DMA_NR_MAX_VCHANS \ |
116 | (SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS) |
117 | |
118 | /* This set of SUN4I_DDMA timing parameters were found experimentally while |
119 | * working with the SPI driver and seem to make it behave correctly */ |
120 | #define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \ |
121 | (SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) | \ |
122 | SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) | \ |
123 | SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \ |
124 | SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2)) |
125 | |
126 | /* |
127 | * Normal DMA supports individual transfers (segments) up to 128k. |
128 | * Dedicated DMA supports transfers up to 16M. We can only report |
129 | * one size limit, so we have to use the smaller value. |
130 | */ |
131 | #define SUN4I_NDMA_MAX_SEG_SIZE SZ_128K |
132 | #define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M |
133 | #define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE |
134 | |
135 | struct sun4i_dma_pchan { |
136 | /* Register base of channel */ |
137 | void __iomem *base; |
138 | /* vchan currently being serviced */ |
139 | struct sun4i_dma_vchan *vchan; |
140 | /* Is this a dedicated pchan? */ |
141 | int is_dedicated; |
142 | }; |
143 | |
144 | struct sun4i_dma_vchan { |
145 | struct virt_dma_chan vc; |
146 | struct dma_slave_config cfg; |
147 | struct sun4i_dma_pchan *pchan; |
148 | struct sun4i_dma_promise *processing; |
149 | struct sun4i_dma_contract *contract; |
150 | u8 endpoint; |
151 | int is_dedicated; |
152 | }; |
153 | |
154 | struct sun4i_dma_promise { |
155 | u32 cfg; |
156 | u32 para; |
157 | dma_addr_t src; |
158 | dma_addr_t dst; |
159 | size_t len; |
160 | struct list_head list; |
161 | }; |
162 | |
163 | /* A contract is a set of promises */ |
164 | struct sun4i_dma_contract { |
165 | struct virt_dma_desc vd; |
166 | struct list_head demands; |
167 | struct list_head completed_demands; |
168 | bool is_cyclic : 1; |
169 | bool use_half_int : 1; |
170 | }; |
171 | |
172 | struct sun4i_dma_dev { |
173 | DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS); |
174 | struct dma_device slave; |
175 | struct sun4i_dma_pchan *pchans; |
176 | struct sun4i_dma_vchan *vchans; |
177 | void __iomem *base; |
178 | struct clk *clk; |
179 | int irq; |
180 | spinlock_t lock; |
181 | }; |
182 | |
183 | static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev) |
184 | { |
185 | return container_of(dev, struct sun4i_dma_dev, slave); |
186 | } |
187 | |
188 | static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan) |
189 | { |
190 | return container_of(chan, struct sun4i_dma_vchan, vc.chan); |
191 | } |
192 | |
193 | static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd) |
194 | { |
195 | return container_of(vd, struct sun4i_dma_contract, vd); |
196 | } |
197 | |
198 | static struct device *chan2dev(struct dma_chan *chan) |
199 | { |
200 | return &chan->dev->device; |
201 | } |
202 | |
203 | static int convert_burst(u32 maxburst) |
204 | { |
205 | if (maxburst > 8) |
206 | return -EINVAL; |
207 | |
208 | /* 1 -> 0, 4 -> 1, 8 -> 2 */ |
209 | return (maxburst >> 2); |
210 | } |
211 | |
212 | static int convert_buswidth(enum dma_slave_buswidth addr_width) |
213 | { |
214 | if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) |
215 | return -EINVAL; |
216 | |
217 | /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */ |
218 | return (addr_width >> 1); |
219 | } |
220 | |
221 | static void sun4i_dma_free_chan_resources(struct dma_chan *chan) |
222 | { |
223 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); |
224 | |
225 | vchan_free_chan_resources(vc: &vchan->vc); |
226 | } |
227 | |
228 | static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv, |
229 | struct sun4i_dma_vchan *vchan) |
230 | { |
231 | struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans; |
232 | unsigned long flags; |
233 | int i, max; |
234 | |
235 | /* |
236 | * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and |
237 | * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones |
238 | */ |
239 | if (vchan->is_dedicated) { |
240 | i = SUN4I_NDMA_NR_MAX_CHANNELS; |
241 | max = SUN4I_DMA_NR_MAX_CHANNELS; |
242 | } else { |
243 | i = 0; |
244 | max = SUN4I_NDMA_NR_MAX_CHANNELS; |
245 | } |
246 | |
247 | spin_lock_irqsave(&priv->lock, flags); |
248 | for_each_clear_bit_from(i, priv->pchans_used, max) { |
249 | pchan = &pchans[i]; |
250 | pchan->vchan = vchan; |
251 | set_bit(nr: i, addr: priv->pchans_used); |
252 | break; |
253 | } |
254 | spin_unlock_irqrestore(lock: &priv->lock, flags); |
255 | |
256 | return pchan; |
257 | } |
258 | |
259 | static void release_pchan(struct sun4i_dma_dev *priv, |
260 | struct sun4i_dma_pchan *pchan) |
261 | { |
262 | unsigned long flags; |
263 | int nr = pchan - priv->pchans; |
264 | |
265 | spin_lock_irqsave(&priv->lock, flags); |
266 | |
267 | pchan->vchan = NULL; |
268 | clear_bit(nr, addr: priv->pchans_used); |
269 | |
270 | spin_unlock_irqrestore(lock: &priv->lock, flags); |
271 | } |
272 | |
273 | static void configure_pchan(struct sun4i_dma_pchan *pchan, |
274 | struct sun4i_dma_promise *d) |
275 | { |
276 | /* |
277 | * Configure addresses and misc parameters depending on type |
278 | * SUN4I_DDMA has an extra field with timing parameters |
279 | */ |
280 | if (pchan->is_dedicated) { |
281 | writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG); |
282 | writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG); |
283 | writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG); |
284 | writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG); |
285 | writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG); |
286 | } else { |
287 | writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG); |
288 | writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG); |
289 | writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG); |
290 | writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG); |
291 | } |
292 | } |
293 | |
294 | static void set_pchan_interrupt(struct sun4i_dma_dev *priv, |
295 | struct sun4i_dma_pchan *pchan, |
296 | int half, int end) |
297 | { |
298 | u32 reg; |
299 | int pchan_number = pchan - priv->pchans; |
300 | unsigned long flags; |
301 | |
302 | spin_lock_irqsave(&priv->lock, flags); |
303 | |
304 | reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG); |
305 | |
306 | if (half) |
307 | reg |= BIT(pchan_number * 2); |
308 | else |
309 | reg &= ~BIT(pchan_number * 2); |
310 | |
311 | if (end) |
312 | reg |= BIT(pchan_number * 2 + 1); |
313 | else |
314 | reg &= ~BIT(pchan_number * 2 + 1); |
315 | |
316 | writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG); |
317 | |
318 | spin_unlock_irqrestore(lock: &priv->lock, flags); |
319 | } |
320 | |
321 | /* |
322 | * Execute pending operations on a vchan |
323 | * |
324 | * When given a vchan, this function will try to acquire a suitable |
325 | * pchan and, if successful, will configure it to fulfill a promise |
326 | * from the next pending contract. |
327 | * |
328 | * This function must be called with &vchan->vc.lock held. |
329 | */ |
330 | static int __execute_vchan_pending(struct sun4i_dma_dev *priv, |
331 | struct sun4i_dma_vchan *vchan) |
332 | { |
333 | struct sun4i_dma_promise *promise = NULL; |
334 | struct sun4i_dma_contract *contract = NULL; |
335 | struct sun4i_dma_pchan *pchan; |
336 | struct virt_dma_desc *vd; |
337 | int ret; |
338 | |
339 | lockdep_assert_held(&vchan->vc.lock); |
340 | |
341 | /* We need a pchan to do anything, so secure one if available */ |
342 | pchan = find_and_use_pchan(priv, vchan); |
343 | if (!pchan) |
344 | return -EBUSY; |
345 | |
346 | /* |
347 | * Channel endpoints must not be repeated, so if this vchan |
348 | * has already submitted some work, we can't do anything else |
349 | */ |
350 | if (vchan->processing) { |
351 | dev_dbg(chan2dev(&vchan->vc.chan), |
352 | "processing something to this endpoint already\n" ); |
353 | ret = -EBUSY; |
354 | goto release_pchan; |
355 | } |
356 | |
357 | do { |
358 | /* Figure out which contract we're working with today */ |
359 | vd = vchan_next_desc(vc: &vchan->vc); |
360 | if (!vd) { |
361 | dev_dbg(chan2dev(&vchan->vc.chan), |
362 | "No pending contract found" ); |
363 | ret = 0; |
364 | goto release_pchan; |
365 | } |
366 | |
367 | contract = to_sun4i_dma_contract(vd); |
368 | if (list_empty(head: &contract->demands)) { |
369 | /* The contract has been completed so mark it as such */ |
370 | list_del(entry: &contract->vd.node); |
371 | vchan_cookie_complete(vd: &contract->vd); |
372 | dev_dbg(chan2dev(&vchan->vc.chan), |
373 | "Empty contract found and marked complete" ); |
374 | } |
375 | } while (list_empty(head: &contract->demands)); |
376 | |
377 | /* Now find out what we need to do */ |
378 | promise = list_first_entry(&contract->demands, |
379 | struct sun4i_dma_promise, list); |
380 | vchan->processing = promise; |
381 | |
382 | /* ... and make it reality */ |
383 | if (promise) { |
384 | vchan->contract = contract; |
385 | vchan->pchan = pchan; |
386 | set_pchan_interrupt(priv, pchan, half: contract->use_half_int, end: 1); |
387 | configure_pchan(pchan, d: promise); |
388 | } |
389 | |
390 | return 0; |
391 | |
392 | release_pchan: |
393 | release_pchan(priv, pchan); |
394 | return ret; |
395 | } |
396 | |
397 | static int sanitize_config(struct dma_slave_config *sconfig, |
398 | enum dma_transfer_direction direction) |
399 | { |
400 | switch (direction) { |
401 | case DMA_MEM_TO_DEV: |
402 | if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) || |
403 | !sconfig->dst_maxburst) |
404 | return -EINVAL; |
405 | |
406 | if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
407 | sconfig->src_addr_width = sconfig->dst_addr_width; |
408 | |
409 | if (!sconfig->src_maxburst) |
410 | sconfig->src_maxburst = sconfig->dst_maxburst; |
411 | |
412 | break; |
413 | |
414 | case DMA_DEV_TO_MEM: |
415 | if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) || |
416 | !sconfig->src_maxburst) |
417 | return -EINVAL; |
418 | |
419 | if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
420 | sconfig->dst_addr_width = sconfig->src_addr_width; |
421 | |
422 | if (!sconfig->dst_maxburst) |
423 | sconfig->dst_maxburst = sconfig->src_maxburst; |
424 | |
425 | break; |
426 | default: |
427 | return 0; |
428 | } |
429 | |
430 | return 0; |
431 | } |
432 | |
433 | /* |
434 | * Generate a promise, to be used in a normal DMA contract. |
435 | * |
436 | * A NDMA promise contains all the information required to program the |
437 | * normal part of the DMA Engine and get data copied. A non-executed |
438 | * promise will live in the demands list on a contract. Once it has been |
439 | * completed, it will be moved to the completed demands list for later freeing. |
440 | * All linked promises will be freed when the corresponding contract is freed |
441 | */ |
442 | static struct sun4i_dma_promise * |
443 | generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, |
444 | size_t len, struct dma_slave_config *sconfig, |
445 | enum dma_transfer_direction direction) |
446 | { |
447 | struct sun4i_dma_promise *promise; |
448 | int ret; |
449 | |
450 | ret = sanitize_config(sconfig, direction); |
451 | if (ret) |
452 | return NULL; |
453 | |
454 | promise = kzalloc(size: sizeof(*promise), GFP_NOWAIT); |
455 | if (!promise) |
456 | return NULL; |
457 | |
458 | promise->src = src; |
459 | promise->dst = dest; |
460 | promise->len = len; |
461 | promise->cfg = SUN4I_DMA_CFG_LOADING | |
462 | SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN; |
463 | |
464 | dev_dbg(chan2dev(chan), |
465 | "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d" , |
466 | sconfig->src_maxburst, sconfig->dst_maxburst, |
467 | sconfig->src_addr_width, sconfig->dst_addr_width); |
468 | |
469 | /* Source burst */ |
470 | ret = convert_burst(maxburst: sconfig->src_maxburst); |
471 | if (ret < 0) |
472 | goto fail; |
473 | promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); |
474 | |
475 | /* Destination burst */ |
476 | ret = convert_burst(maxburst: sconfig->dst_maxburst); |
477 | if (ret < 0) |
478 | goto fail; |
479 | promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); |
480 | |
481 | /* Source bus width */ |
482 | ret = convert_buswidth(addr_width: sconfig->src_addr_width); |
483 | if (ret < 0) |
484 | goto fail; |
485 | promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); |
486 | |
487 | /* Destination bus width */ |
488 | ret = convert_buswidth(addr_width: sconfig->dst_addr_width); |
489 | if (ret < 0) |
490 | goto fail; |
491 | promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); |
492 | |
493 | return promise; |
494 | |
495 | fail: |
496 | kfree(objp: promise); |
497 | return NULL; |
498 | } |
499 | |
500 | /* |
501 | * Generate a promise, to be used in a dedicated DMA contract. |
502 | * |
503 | * A DDMA promise contains all the information required to program the |
504 | * Dedicated part of the DMA Engine and get data copied. A non-executed |
505 | * promise will live in the demands list on a contract. Once it has been |
506 | * completed, it will be moved to the completed demands list for later freeing. |
507 | * All linked promises will be freed when the corresponding contract is freed |
508 | */ |
509 | static struct sun4i_dma_promise * |
510 | generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, |
511 | size_t len, struct dma_slave_config *sconfig) |
512 | { |
513 | struct sun4i_dma_promise *promise; |
514 | int ret; |
515 | |
516 | promise = kzalloc(size: sizeof(*promise), GFP_NOWAIT); |
517 | if (!promise) |
518 | return NULL; |
519 | |
520 | promise->src = src; |
521 | promise->dst = dest; |
522 | promise->len = len; |
523 | promise->cfg = SUN4I_DMA_CFG_LOADING | |
524 | SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN; |
525 | |
526 | /* Source burst */ |
527 | ret = convert_burst(maxburst: sconfig->src_maxburst); |
528 | if (ret < 0) |
529 | goto fail; |
530 | promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); |
531 | |
532 | /* Destination burst */ |
533 | ret = convert_burst(maxburst: sconfig->dst_maxburst); |
534 | if (ret < 0) |
535 | goto fail; |
536 | promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); |
537 | |
538 | /* Source bus width */ |
539 | ret = convert_buswidth(addr_width: sconfig->src_addr_width); |
540 | if (ret < 0) |
541 | goto fail; |
542 | promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); |
543 | |
544 | /* Destination bus width */ |
545 | ret = convert_buswidth(addr_width: sconfig->dst_addr_width); |
546 | if (ret < 0) |
547 | goto fail; |
548 | promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); |
549 | |
550 | return promise; |
551 | |
552 | fail: |
553 | kfree(objp: promise); |
554 | return NULL; |
555 | } |
556 | |
557 | /* |
558 | * Generate a contract |
559 | * |
560 | * Contracts function as DMA descriptors. As our hardware does not support |
561 | * linked lists, we need to implement SG via software. We use a contract |
562 | * to hold all the pieces of the request and process them serially one |
563 | * after another. Each piece is represented as a promise. |
564 | */ |
565 | static struct sun4i_dma_contract *generate_dma_contract(void) |
566 | { |
567 | struct sun4i_dma_contract *contract; |
568 | |
569 | contract = kzalloc(size: sizeof(*contract), GFP_NOWAIT); |
570 | if (!contract) |
571 | return NULL; |
572 | |
573 | INIT_LIST_HEAD(list: &contract->demands); |
574 | INIT_LIST_HEAD(list: &contract->completed_demands); |
575 | |
576 | return contract; |
577 | } |
578 | |
579 | /* |
580 | * Get next promise on a cyclic transfer |
581 | * |
582 | * Cyclic contracts contain a series of promises which are executed on a |
583 | * loop. This function returns the next promise from a cyclic contract, |
584 | * so it can be programmed into the hardware. |
585 | */ |
586 | static struct sun4i_dma_promise * |
587 | get_next_cyclic_promise(struct sun4i_dma_contract *contract) |
588 | { |
589 | struct sun4i_dma_promise *promise; |
590 | |
591 | promise = list_first_entry_or_null(&contract->demands, |
592 | struct sun4i_dma_promise, list); |
593 | if (!promise) { |
594 | list_splice_init(list: &contract->completed_demands, |
595 | head: &contract->demands); |
596 | promise = list_first_entry(&contract->demands, |
597 | struct sun4i_dma_promise, list); |
598 | } |
599 | |
600 | return promise; |
601 | } |
602 | |
603 | /* |
604 | * Free a contract and all its associated promises |
605 | */ |
606 | static void sun4i_dma_free_contract(struct virt_dma_desc *vd) |
607 | { |
608 | struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); |
609 | struct sun4i_dma_promise *promise, *tmp; |
610 | |
611 | /* Free all the demands and completed demands */ |
612 | list_for_each_entry_safe(promise, tmp, &contract->demands, list) |
613 | kfree(objp: promise); |
614 | |
615 | list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) |
616 | kfree(objp: promise); |
617 | |
618 | kfree(objp: contract); |
619 | } |
620 | |
621 | static struct dma_async_tx_descriptor * |
622 | sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, |
623 | dma_addr_t src, size_t len, unsigned long flags) |
624 | { |
625 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); |
626 | struct dma_slave_config *sconfig = &vchan->cfg; |
627 | struct sun4i_dma_promise *promise; |
628 | struct sun4i_dma_contract *contract; |
629 | |
630 | contract = generate_dma_contract(); |
631 | if (!contract) |
632 | return NULL; |
633 | |
634 | /* |
635 | * We can only do the copy to bus aligned addresses, so |
636 | * choose the best one so we get decent performance. We also |
637 | * maximize the burst size for this same reason. |
638 | */ |
639 | sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
640 | sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
641 | sconfig->src_maxburst = 8; |
642 | sconfig->dst_maxburst = 8; |
643 | |
644 | if (vchan->is_dedicated) |
645 | promise = generate_ddma_promise(chan, src, dest, len, sconfig); |
646 | else |
647 | promise = generate_ndma_promise(chan, src, dest, len, sconfig, |
648 | direction: DMA_MEM_TO_MEM); |
649 | |
650 | if (!promise) { |
651 | kfree(objp: contract); |
652 | return NULL; |
653 | } |
654 | |
655 | /* Configure memcpy mode */ |
656 | if (vchan->is_dedicated) { |
657 | promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) | |
658 | SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM); |
659 | } else { |
660 | promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) | |
661 | SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM); |
662 | } |
663 | |
664 | /* Fill the contract with our only promise */ |
665 | list_add_tail(new: &promise->list, head: &contract->demands); |
666 | |
667 | /* And add it to the vchan */ |
668 | return vchan_tx_prep(vc: &vchan->vc, vd: &contract->vd, tx_flags: flags); |
669 | } |
670 | |
671 | static struct dma_async_tx_descriptor * |
672 | sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, |
673 | size_t period_len, enum dma_transfer_direction dir, |
674 | unsigned long flags) |
675 | { |
676 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); |
677 | struct dma_slave_config *sconfig = &vchan->cfg; |
678 | struct sun4i_dma_promise *promise; |
679 | struct sun4i_dma_contract *contract; |
680 | dma_addr_t src, dest; |
681 | u32 endpoints; |
682 | int nr_periods, offset, plength, i; |
683 | u8 ram_type, io_mode, linear_mode; |
684 | |
685 | if (!is_slave_direction(direction: dir)) { |
686 | dev_err(chan2dev(chan), "Invalid DMA direction\n" ); |
687 | return NULL; |
688 | } |
689 | |
690 | contract = generate_dma_contract(); |
691 | if (!contract) |
692 | return NULL; |
693 | |
694 | contract->is_cyclic = 1; |
695 | |
696 | if (vchan->is_dedicated) { |
697 | io_mode = SUN4I_DDMA_ADDR_MODE_IO; |
698 | linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR; |
699 | ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM; |
700 | } else { |
701 | io_mode = SUN4I_NDMA_ADDR_MODE_IO; |
702 | linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR; |
703 | ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM; |
704 | } |
705 | |
706 | if (dir == DMA_MEM_TO_DEV) { |
707 | src = buf; |
708 | dest = sconfig->dst_addr; |
709 | endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | |
710 | SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) | |
711 | SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) | |
712 | SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode); |
713 | } else { |
714 | src = sconfig->src_addr; |
715 | dest = buf; |
716 | endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) | |
717 | SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) | |
718 | SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | |
719 | SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode); |
720 | } |
721 | |
722 | /* |
723 | * We will be using half done interrupts to make two periods |
724 | * out of a promise, so we need to program the DMA engine less |
725 | * often |
726 | */ |
727 | |
728 | /* |
729 | * The engine can interrupt on half-transfer, so we can use |
730 | * this feature to program the engine half as often as if we |
731 | * didn't use it (keep in mind the hardware doesn't support |
732 | * linked lists). |
733 | * |
734 | * Say you have a set of periods (| marks the start/end, I for |
735 | * interrupt, P for programming the engine to do a new |
736 | * transfer), the easy but slow way would be to do |
737 | * |
738 | * |---|---|---|---| (periods / promises) |
739 | * P I,P I,P I,P I |
740 | * |
741 | * Using half transfer interrupts you can do |
742 | * |
743 | * |-------|-------| (promises as configured on hw) |
744 | * |---|---|---|---| (periods) |
745 | * P I I,P I I |
746 | * |
747 | * Which requires half the engine programming for the same |
748 | * functionality. |
749 | * |
750 | * This only works if two periods fit in a single promise. That will |
751 | * always be the case for dedicated DMA, where the hardware has a much |
752 | * larger maximum transfer size than advertised to clients. |
753 | */ |
754 | if (vchan->is_dedicated || period_len <= SUN4I_NDMA_MAX_SEG_SIZE / 2) { |
755 | period_len *= 2; |
756 | contract->use_half_int = 1; |
757 | } |
758 | |
759 | nr_periods = DIV_ROUND_UP(len, period_len); |
760 | for (i = 0; i < nr_periods; i++) { |
761 | /* Calculate the offset in the buffer and the length needed */ |
762 | offset = i * period_len; |
763 | plength = min((len - offset), period_len); |
764 | if (dir == DMA_MEM_TO_DEV) |
765 | src = buf + offset; |
766 | else |
767 | dest = buf + offset; |
768 | |
769 | /* Make the promise */ |
770 | if (vchan->is_dedicated) |
771 | promise = generate_ddma_promise(chan, src, dest, |
772 | len: plength, sconfig); |
773 | else |
774 | promise = generate_ndma_promise(chan, src, dest, |
775 | len: plength, sconfig, direction: dir); |
776 | |
777 | if (!promise) { |
778 | /* TODO: should we free everything? */ |
779 | return NULL; |
780 | } |
781 | promise->cfg |= endpoints; |
782 | |
783 | /* Then add it to the contract */ |
784 | list_add_tail(new: &promise->list, head: &contract->demands); |
785 | } |
786 | |
787 | /* And add it to the vchan */ |
788 | return vchan_tx_prep(vc: &vchan->vc, vd: &contract->vd, tx_flags: flags); |
789 | } |
790 | |
791 | static struct dma_async_tx_descriptor * |
792 | sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
793 | unsigned int sg_len, enum dma_transfer_direction dir, |
794 | unsigned long flags, void *context) |
795 | { |
796 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); |
797 | struct dma_slave_config *sconfig = &vchan->cfg; |
798 | struct sun4i_dma_promise *promise; |
799 | struct sun4i_dma_contract *contract; |
800 | u8 ram_type, io_mode, linear_mode; |
801 | struct scatterlist *sg; |
802 | dma_addr_t srcaddr, dstaddr; |
803 | u32 endpoints, para; |
804 | int i; |
805 | |
806 | if (!sgl) |
807 | return NULL; |
808 | |
809 | if (!is_slave_direction(direction: dir)) { |
810 | dev_err(chan2dev(chan), "Invalid DMA direction\n" ); |
811 | return NULL; |
812 | } |
813 | |
814 | contract = generate_dma_contract(); |
815 | if (!contract) |
816 | return NULL; |
817 | |
818 | if (vchan->is_dedicated) { |
819 | io_mode = SUN4I_DDMA_ADDR_MODE_IO; |
820 | linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR; |
821 | ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM; |
822 | } else { |
823 | io_mode = SUN4I_NDMA_ADDR_MODE_IO; |
824 | linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR; |
825 | ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM; |
826 | } |
827 | |
828 | if (dir == DMA_MEM_TO_DEV) |
829 | endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | |
830 | SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) | |
831 | SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) | |
832 | SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode); |
833 | else |
834 | endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) | |
835 | SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) | |
836 | SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | |
837 | SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode); |
838 | |
839 | for_each_sg(sgl, sg, sg_len, i) { |
840 | /* Figure out addresses */ |
841 | if (dir == DMA_MEM_TO_DEV) { |
842 | srcaddr = sg_dma_address(sg); |
843 | dstaddr = sconfig->dst_addr; |
844 | } else { |
845 | srcaddr = sconfig->src_addr; |
846 | dstaddr = sg_dma_address(sg); |
847 | } |
848 | |
849 | /* |
850 | * These are the magic DMA engine timings that keep SPI going. |
851 | * I haven't seen any interface on DMAEngine to configure |
852 | * timings, and so far they seem to work for everything we |
853 | * support, so I've kept them here. I don't know if other |
854 | * devices need different timings because, as usual, we only |
855 | * have the "para" bitfield meanings, but no comment on what |
856 | * the values should be when doing a certain operation :| |
857 | */ |
858 | para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS; |
859 | |
860 | /* And make a suitable promise */ |
861 | if (vchan->is_dedicated) |
862 | promise = generate_ddma_promise(chan, src: srcaddr, dest: dstaddr, |
863 | sg_dma_len(sg), |
864 | sconfig); |
865 | else |
866 | promise = generate_ndma_promise(chan, src: srcaddr, dest: dstaddr, |
867 | sg_dma_len(sg), |
868 | sconfig, direction: dir); |
869 | |
870 | if (!promise) |
871 | return NULL; /* TODO: should we free everything? */ |
872 | |
873 | promise->cfg |= endpoints; |
874 | promise->para = para; |
875 | |
876 | /* Then add it to the contract */ |
877 | list_add_tail(new: &promise->list, head: &contract->demands); |
878 | } |
879 | |
880 | /* |
881 | * Once we've got all the promises ready, add the contract |
882 | * to the pending list on the vchan |
883 | */ |
884 | return vchan_tx_prep(vc: &vchan->vc, vd: &contract->vd, tx_flags: flags); |
885 | } |
886 | |
887 | static int sun4i_dma_terminate_all(struct dma_chan *chan) |
888 | { |
889 | struct sun4i_dma_dev *priv = to_sun4i_dma_dev(dev: chan->device); |
890 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); |
891 | struct sun4i_dma_pchan *pchan = vchan->pchan; |
892 | LIST_HEAD(head); |
893 | unsigned long flags; |
894 | |
895 | spin_lock_irqsave(&vchan->vc.lock, flags); |
896 | vchan_get_all_descriptors(vc: &vchan->vc, head: &head); |
897 | spin_unlock_irqrestore(lock: &vchan->vc.lock, flags); |
898 | |
899 | /* |
900 | * Clearing the configuration register will halt the pchan. Interrupts |
901 | * may still trigger, so don't forget to disable them. |
902 | */ |
903 | if (pchan) { |
904 | if (pchan->is_dedicated) |
905 | writel(val: 0, addr: pchan->base + SUN4I_DDMA_CFG_REG); |
906 | else |
907 | writel(val: 0, addr: pchan->base + SUN4I_NDMA_CFG_REG); |
908 | set_pchan_interrupt(priv, pchan, half: 0, end: 0); |
909 | release_pchan(priv, pchan); |
910 | } |
911 | |
912 | spin_lock_irqsave(&vchan->vc.lock, flags); |
913 | /* Clear these so the vchan is usable again */ |
914 | vchan->processing = NULL; |
915 | vchan->pchan = NULL; |
916 | spin_unlock_irqrestore(lock: &vchan->vc.lock, flags); |
917 | |
918 | vchan_dma_desc_free_list(vc: &vchan->vc, head: &head); |
919 | |
920 | return 0; |
921 | } |
922 | |
923 | static int sun4i_dma_config(struct dma_chan *chan, |
924 | struct dma_slave_config *config) |
925 | { |
926 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); |
927 | |
928 | memcpy(&vchan->cfg, config, sizeof(*config)); |
929 | |
930 | return 0; |
931 | } |
932 | |
933 | static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec, |
934 | struct of_dma *ofdma) |
935 | { |
936 | struct sun4i_dma_dev *priv = ofdma->of_dma_data; |
937 | struct sun4i_dma_vchan *vchan; |
938 | struct dma_chan *chan; |
939 | u8 is_dedicated = dma_spec->args[0]; |
940 | u8 endpoint = dma_spec->args[1]; |
941 | |
942 | /* Check if type is Normal or Dedicated */ |
943 | if (is_dedicated != 0 && is_dedicated != 1) |
944 | return NULL; |
945 | |
946 | /* Make sure the endpoint looks sane */ |
947 | if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) || |
948 | (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT)) |
949 | return NULL; |
950 | |
951 | chan = dma_get_any_slave_channel(device: &priv->slave); |
952 | if (!chan) |
953 | return NULL; |
954 | |
955 | /* Assign the endpoint to the vchan */ |
956 | vchan = to_sun4i_dma_vchan(chan); |
957 | vchan->is_dedicated = is_dedicated; |
958 | vchan->endpoint = endpoint; |
959 | |
960 | return chan; |
961 | } |
962 | |
963 | static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan, |
964 | dma_cookie_t cookie, |
965 | struct dma_tx_state *state) |
966 | { |
967 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); |
968 | struct sun4i_dma_pchan *pchan = vchan->pchan; |
969 | struct sun4i_dma_contract *contract; |
970 | struct sun4i_dma_promise *promise; |
971 | struct virt_dma_desc *vd; |
972 | unsigned long flags; |
973 | enum dma_status ret; |
974 | size_t bytes = 0; |
975 | |
976 | ret = dma_cookie_status(chan, cookie, state); |
977 | if (!state || (ret == DMA_COMPLETE)) |
978 | return ret; |
979 | |
980 | spin_lock_irqsave(&vchan->vc.lock, flags); |
981 | vd = vchan_find_desc(&vchan->vc, cookie); |
982 | if (!vd) |
983 | goto exit; |
984 | contract = to_sun4i_dma_contract(vd); |
985 | |
986 | list_for_each_entry(promise, &contract->demands, list) |
987 | bytes += promise->len; |
988 | |
989 | /* |
990 | * The hardware is configured to return the remaining byte |
991 | * quantity. If possible, replace the first listed element's |
992 | * full size with the actual remaining amount |
993 | */ |
994 | promise = list_first_entry_or_null(&contract->demands, |
995 | struct sun4i_dma_promise, list); |
996 | if (promise && pchan) { |
997 | bytes -= promise->len; |
998 | if (pchan->is_dedicated) |
999 | bytes += readl(addr: pchan->base + SUN4I_DDMA_BYTE_COUNT_REG); |
1000 | else |
1001 | bytes += readl(addr: pchan->base + SUN4I_NDMA_BYTE_COUNT_REG); |
1002 | } |
1003 | |
1004 | exit: |
1005 | |
1006 | dma_set_residue(state, residue: bytes); |
1007 | spin_unlock_irqrestore(lock: &vchan->vc.lock, flags); |
1008 | |
1009 | return ret; |
1010 | } |
1011 | |
1012 | static void sun4i_dma_issue_pending(struct dma_chan *chan) |
1013 | { |
1014 | struct sun4i_dma_dev *priv = to_sun4i_dma_dev(dev: chan->device); |
1015 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); |
1016 | unsigned long flags; |
1017 | |
1018 | spin_lock_irqsave(&vchan->vc.lock, flags); |
1019 | |
1020 | /* |
1021 | * If there are pending transactions for this vchan, push one of |
1022 | * them into the engine to get the ball rolling. |
1023 | */ |
1024 | if (vchan_issue_pending(vc: &vchan->vc)) |
1025 | __execute_vchan_pending(priv, vchan); |
1026 | |
1027 | spin_unlock_irqrestore(lock: &vchan->vc.lock, flags); |
1028 | } |
1029 | |
1030 | static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id) |
1031 | { |
1032 | struct sun4i_dma_dev *priv = dev_id; |
1033 | struct sun4i_dma_pchan *pchans = priv->pchans, *pchan; |
1034 | struct sun4i_dma_vchan *vchan; |
1035 | struct sun4i_dma_contract *contract; |
1036 | struct sun4i_dma_promise *promise; |
1037 | unsigned long pendirq, irqs, disableirqs; |
1038 | int bit, i, free_room, allow_mitigation = 1; |
1039 | |
1040 | pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); |
1041 | |
1042 | handle_pending: |
1043 | |
1044 | disableirqs = 0; |
1045 | free_room = 0; |
1046 | |
1047 | for_each_set_bit(bit, &pendirq, 32) { |
1048 | pchan = &pchans[bit >> 1]; |
1049 | vchan = pchan->vchan; |
1050 | if (!vchan) /* a terminated channel may still interrupt */ |
1051 | continue; |
1052 | contract = vchan->contract; |
1053 | |
1054 | /* |
1055 | * Disable the IRQ and free the pchan if it's an end |
1056 | * interrupt (odd bit) |
1057 | */ |
1058 | if (bit & 1) { |
1059 | spin_lock(lock: &vchan->vc.lock); |
1060 | |
1061 | /* |
1062 | * Move the promise into the completed list now that |
1063 | * we're done with it |
1064 | */ |
1065 | list_move_tail(list: &vchan->processing->list, |
1066 | head: &contract->completed_demands); |
1067 | |
1068 | /* |
1069 | * Cyclic DMA transfers are special: |
1070 | * - There's always something we can dispatch |
1071 | * - We need to run the callback |
1072 | * - Latency is very important, as this is used by audio |
1073 | * We therefore just cycle through the list and dispatch |
1074 | * whatever we have here, reusing the pchan. There's |
1075 | * no need to run the thread after this. |
1076 | * |
1077 | * For non-cyclic transfers we need to look around, |
1078 | * so we can program some more work, or notify the |
1079 | * client that their transfers have been completed. |
1080 | */ |
1081 | if (contract->is_cyclic) { |
1082 | promise = get_next_cyclic_promise(contract); |
1083 | vchan->processing = promise; |
1084 | configure_pchan(pchan, d: promise); |
1085 | vchan_cyclic_callback(vd: &contract->vd); |
1086 | } else { |
1087 | vchan->processing = NULL; |
1088 | vchan->pchan = NULL; |
1089 | |
1090 | free_room = 1; |
1091 | disableirqs |= BIT(bit); |
1092 | release_pchan(priv, pchan); |
1093 | } |
1094 | |
1095 | spin_unlock(lock: &vchan->vc.lock); |
1096 | } else { |
1097 | /* Half done interrupt */ |
1098 | if (contract->is_cyclic) |
1099 | vchan_cyclic_callback(vd: &contract->vd); |
1100 | else |
1101 | disableirqs |= BIT(bit); |
1102 | } |
1103 | } |
1104 | |
1105 | /* Disable the IRQs for events we handled */ |
1106 | spin_lock(lock: &priv->lock); |
1107 | irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG); |
1108 | writel_relaxed(irqs & ~disableirqs, |
1109 | priv->base + SUN4I_DMA_IRQ_ENABLE_REG); |
1110 | spin_unlock(lock: &priv->lock); |
1111 | |
1112 | /* Writing 1 to the pending field will clear the pending interrupt */ |
1113 | writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); |
1114 | |
1115 | /* |
1116 | * If a pchan was freed, we may be able to schedule something else, |
1117 | * so have a look around |
1118 | */ |
1119 | if (free_room) { |
1120 | for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) { |
1121 | vchan = &priv->vchans[i]; |
1122 | spin_lock(lock: &vchan->vc.lock); |
1123 | __execute_vchan_pending(priv, vchan); |
1124 | spin_unlock(lock: &vchan->vc.lock); |
1125 | } |
1126 | } |
1127 | |
1128 | /* |
1129 | * Handle newer interrupts if some showed up, but only do it once |
1130 | * to avoid a too long a loop |
1131 | */ |
1132 | if (allow_mitigation) { |
1133 | pendirq = readl_relaxed(priv->base + |
1134 | SUN4I_DMA_IRQ_PENDING_STATUS_REG); |
1135 | if (pendirq) { |
1136 | allow_mitigation = 0; |
1137 | goto handle_pending; |
1138 | } |
1139 | } |
1140 | |
1141 | return IRQ_HANDLED; |
1142 | } |
1143 | |
1144 | static int sun4i_dma_probe(struct platform_device *pdev) |
1145 | { |
1146 | struct sun4i_dma_dev *priv; |
1147 | int i, j, ret; |
1148 | |
1149 | priv = devm_kzalloc(dev: &pdev->dev, size: sizeof(*priv), GFP_KERNEL); |
1150 | if (!priv) |
1151 | return -ENOMEM; |
1152 | |
1153 | priv->base = devm_platform_ioremap_resource(pdev, index: 0); |
1154 | if (IS_ERR(ptr: priv->base)) |
1155 | return PTR_ERR(ptr: priv->base); |
1156 | |
1157 | priv->irq = platform_get_irq(pdev, 0); |
1158 | if (priv->irq < 0) |
1159 | return priv->irq; |
1160 | |
1161 | priv->clk = devm_clk_get(dev: &pdev->dev, NULL); |
1162 | if (IS_ERR(ptr: priv->clk)) { |
1163 | dev_err(&pdev->dev, "No clock specified\n" ); |
1164 | return PTR_ERR(ptr: priv->clk); |
1165 | } |
1166 | |
1167 | platform_set_drvdata(pdev, data: priv); |
1168 | spin_lock_init(&priv->lock); |
1169 | |
1170 | dma_set_max_seg_size(dev: &pdev->dev, SUN4I_DMA_MAX_SEG_SIZE); |
1171 | |
1172 | dma_cap_zero(priv->slave.cap_mask); |
1173 | dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask); |
1174 | dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask); |
1175 | dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask); |
1176 | dma_cap_set(DMA_SLAVE, priv->slave.cap_mask); |
1177 | |
1178 | INIT_LIST_HEAD(list: &priv->slave.channels); |
1179 | priv->slave.device_free_chan_resources = sun4i_dma_free_chan_resources; |
1180 | priv->slave.device_tx_status = sun4i_dma_tx_status; |
1181 | priv->slave.device_issue_pending = sun4i_dma_issue_pending; |
1182 | priv->slave.device_prep_slave_sg = sun4i_dma_prep_slave_sg; |
1183 | priv->slave.device_prep_dma_memcpy = sun4i_dma_prep_dma_memcpy; |
1184 | priv->slave.device_prep_dma_cyclic = sun4i_dma_prep_dma_cyclic; |
1185 | priv->slave.device_config = sun4i_dma_config; |
1186 | priv->slave.device_terminate_all = sun4i_dma_terminate_all; |
1187 | priv->slave.copy_align = 2; |
1188 | priv->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1189 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | |
1190 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
1191 | priv->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1192 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | |
1193 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
1194 | priv->slave.directions = BIT(DMA_DEV_TO_MEM) | |
1195 | BIT(DMA_MEM_TO_DEV); |
1196 | priv->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1197 | |
1198 | priv->slave.dev = &pdev->dev; |
1199 | |
1200 | priv->pchans = devm_kcalloc(dev: &pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS, |
1201 | size: sizeof(struct sun4i_dma_pchan), GFP_KERNEL); |
1202 | priv->vchans = devm_kcalloc(dev: &pdev->dev, SUN4I_DMA_NR_MAX_VCHANS, |
1203 | size: sizeof(struct sun4i_dma_vchan), GFP_KERNEL); |
1204 | if (!priv->vchans || !priv->pchans) |
1205 | return -ENOMEM; |
1206 | |
1207 | /* |
1208 | * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and |
1209 | * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are |
1210 | * dedicated ones |
1211 | */ |
1212 | for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++) |
1213 | priv->pchans[i].base = priv->base + |
1214 | SUN4I_NDMA_CHANNEL_REG_BASE(i); |
1215 | |
1216 | for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) { |
1217 | priv->pchans[i].base = priv->base + |
1218 | SUN4I_DDMA_CHANNEL_REG_BASE(j); |
1219 | priv->pchans[i].is_dedicated = 1; |
1220 | } |
1221 | |
1222 | for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) { |
1223 | struct sun4i_dma_vchan *vchan = &priv->vchans[i]; |
1224 | |
1225 | spin_lock_init(&vchan->vc.lock); |
1226 | vchan->vc.desc_free = sun4i_dma_free_contract; |
1227 | vchan_init(vc: &vchan->vc, dmadev: &priv->slave); |
1228 | } |
1229 | |
1230 | ret = clk_prepare_enable(clk: priv->clk); |
1231 | if (ret) { |
1232 | dev_err(&pdev->dev, "Couldn't enable the clock\n" ); |
1233 | return ret; |
1234 | } |
1235 | |
1236 | /* |
1237 | * Make sure the IRQs are all disabled and accounted for. The bootloader |
1238 | * likes to leave these dirty |
1239 | */ |
1240 | writel(val: 0, addr: priv->base + SUN4I_DMA_IRQ_ENABLE_REG); |
1241 | writel(val: 0xFFFFFFFF, addr: priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); |
1242 | |
1243 | ret = devm_request_irq(dev: &pdev->dev, irq: priv->irq, handler: sun4i_dma_interrupt, |
1244 | irqflags: 0, devname: dev_name(dev: &pdev->dev), dev_id: priv); |
1245 | if (ret) { |
1246 | dev_err(&pdev->dev, "Cannot request IRQ\n" ); |
1247 | goto err_clk_disable; |
1248 | } |
1249 | |
1250 | ret = dma_async_device_register(device: &priv->slave); |
1251 | if (ret) { |
1252 | dev_warn(&pdev->dev, "Failed to register DMA engine device\n" ); |
1253 | goto err_clk_disable; |
1254 | } |
1255 | |
1256 | ret = of_dma_controller_register(np: pdev->dev.of_node, of_dma_xlate: sun4i_dma_of_xlate, |
1257 | data: priv); |
1258 | if (ret) { |
1259 | dev_err(&pdev->dev, "of_dma_controller_register failed\n" ); |
1260 | goto err_dma_unregister; |
1261 | } |
1262 | |
1263 | dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n" ); |
1264 | |
1265 | return 0; |
1266 | |
1267 | err_dma_unregister: |
1268 | dma_async_device_unregister(device: &priv->slave); |
1269 | err_clk_disable: |
1270 | clk_disable_unprepare(clk: priv->clk); |
1271 | return ret; |
1272 | } |
1273 | |
1274 | static void sun4i_dma_remove(struct platform_device *pdev) |
1275 | { |
1276 | struct sun4i_dma_dev *priv = platform_get_drvdata(pdev); |
1277 | |
1278 | /* Disable IRQ so no more work is scheduled */ |
1279 | disable_irq(irq: priv->irq); |
1280 | |
1281 | of_dma_controller_free(np: pdev->dev.of_node); |
1282 | dma_async_device_unregister(device: &priv->slave); |
1283 | |
1284 | clk_disable_unprepare(clk: priv->clk); |
1285 | } |
1286 | |
1287 | static const struct of_device_id sun4i_dma_match[] = { |
1288 | { .compatible = "allwinner,sun4i-a10-dma" }, |
1289 | { /* sentinel */ }, |
1290 | }; |
1291 | MODULE_DEVICE_TABLE(of, sun4i_dma_match); |
1292 | |
1293 | static struct platform_driver sun4i_dma_driver = { |
1294 | .probe = sun4i_dma_probe, |
1295 | .remove_new = sun4i_dma_remove, |
1296 | .driver = { |
1297 | .name = "sun4i-dma" , |
1298 | .of_match_table = sun4i_dma_match, |
1299 | }, |
1300 | }; |
1301 | |
1302 | module_platform_driver(sun4i_dma_driver); |
1303 | |
1304 | MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver" ); |
1305 | MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>" ); |
1306 | MODULE_LICENSE("GPL" ); |
1307 | |