1 | // SPDX-License-Identifier: GPL-2.0-or-later |
---|---|
2 | /* |
3 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com |
5 | * |
6 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. |
7 | * Jaswinder Singh <jassi.brar@samsung.com> |
8 | */ |
9 | |
10 | #include <linux/debugfs.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/io.h> |
13 | #include <linux/init.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/module.h> |
16 | #include <linux/string.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/interrupt.h> |
19 | #include <linux/dma-mapping.h> |
20 | #include <linux/dmaengine.h> |
21 | #include <linux/amba/bus.h> |
22 | #include <linux/scatterlist.h> |
23 | #include <linux/of.h> |
24 | #include <linux/of_dma.h> |
25 | #include <linux/err.h> |
26 | #include <linux/pm_runtime.h> |
27 | #include <linux/bug.h> |
28 | #include <linux/reset.h> |
29 | |
30 | #include "dmaengine.h" |
31 | #define PL330_MAX_CHAN 8 |
32 | #define PL330_MAX_IRQS 32 |
33 | #define PL330_MAX_PERI 32 |
34 | #define PL330_MAX_BURST 16 |
35 | |
36 | #define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0) |
37 | #define PL330_QUIRK_PERIPH_BURST BIT(1) |
38 | |
39 | enum pl330_cachectrl { |
40 | CCTRL0, /* Noncacheable and nonbufferable */ |
41 | CCTRL1, /* Bufferable only */ |
42 | CCTRL2, /* Cacheable, but do not allocate */ |
43 | CCTRL3, /* Cacheable and bufferable, but do not allocate */ |
44 | INVALID1, /* AWCACHE = 0x1000 */ |
45 | INVALID2, |
46 | CCTRL6, /* Cacheable write-through, allocate on writes only */ |
47 | CCTRL7, /* Cacheable write-back, allocate on writes only */ |
48 | }; |
49 | |
50 | enum pl330_byteswap { |
51 | SWAP_NO, |
52 | SWAP_2, |
53 | SWAP_4, |
54 | SWAP_8, |
55 | SWAP_16, |
56 | }; |
57 | |
58 | /* Register and Bit field Definitions */ |
59 | #define DS 0x0 |
60 | #define DS_ST_STOP 0x0 |
61 | #define DS_ST_EXEC 0x1 |
62 | #define DS_ST_CMISS 0x2 |
63 | #define DS_ST_UPDTPC 0x3 |
64 | #define DS_ST_WFE 0x4 |
65 | #define DS_ST_ATBRR 0x5 |
66 | #define DS_ST_QBUSY 0x6 |
67 | #define DS_ST_WFP 0x7 |
68 | #define DS_ST_KILL 0x8 |
69 | #define DS_ST_CMPLT 0x9 |
70 | #define DS_ST_FLTCMP 0xe |
71 | #define DS_ST_FAULT 0xf |
72 | |
73 | #define DPC 0x4 |
74 | #define INTEN 0x20 |
75 | #define ES 0x24 |
76 | #define INTSTATUS 0x28 |
77 | #define INTCLR 0x2c |
78 | #define FSM 0x30 |
79 | #define FSC 0x34 |
80 | #define FTM 0x38 |
81 | |
82 | #define _FTC 0x40 |
83 | #define FTC(n) (_FTC + (n)*0x4) |
84 | |
85 | #define _CS 0x100 |
86 | #define CS(n) (_CS + (n)*0x8) |
87 | #define CS_CNS (1 << 21) |
88 | |
89 | #define _CPC 0x104 |
90 | #define CPC(n) (_CPC + (n)*0x8) |
91 | |
92 | #define _SA 0x400 |
93 | #define SA(n) (_SA + (n)*0x20) |
94 | |
95 | #define _DA 0x404 |
96 | #define DA(n) (_DA + (n)*0x20) |
97 | |
98 | #define _CC 0x408 |
99 | #define CC(n) (_CC + (n)*0x20) |
100 | |
101 | #define CC_SRCINC (1 << 0) |
102 | #define CC_DSTINC (1 << 14) |
103 | #define CC_SRCPRI (1 << 8) |
104 | #define CC_DSTPRI (1 << 22) |
105 | #define CC_SRCNS (1 << 9) |
106 | #define CC_DSTNS (1 << 23) |
107 | #define CC_SRCIA (1 << 10) |
108 | #define CC_DSTIA (1 << 24) |
109 | #define CC_SRCBRSTLEN_SHFT 4 |
110 | #define CC_DSTBRSTLEN_SHFT 18 |
111 | #define CC_SRCBRSTSIZE_SHFT 1 |
112 | #define CC_DSTBRSTSIZE_SHFT 15 |
113 | #define CC_SRCCCTRL_SHFT 11 |
114 | #define CC_SRCCCTRL_MASK 0x7 |
115 | #define CC_DSTCCTRL_SHFT 25 |
116 | #define CC_DRCCCTRL_MASK 0x7 |
117 | #define CC_SWAP_SHFT 28 |
118 | |
119 | #define _LC0 0x40c |
120 | #define LC0(n) (_LC0 + (n)*0x20) |
121 | |
122 | #define _LC1 0x410 |
123 | #define LC1(n) (_LC1 + (n)*0x20) |
124 | |
125 | #define DBGSTATUS 0xd00 |
126 | #define DBG_BUSY (1 << 0) |
127 | |
128 | #define DBGCMD 0xd04 |
129 | #define DBGINST0 0xd08 |
130 | #define DBGINST1 0xd0c |
131 | |
132 | #define CR0 0xe00 |
133 | #define CR1 0xe04 |
134 | #define CR2 0xe08 |
135 | #define CR3 0xe0c |
136 | #define CR4 0xe10 |
137 | #define CRD 0xe14 |
138 | |
139 | #define PERIPH_ID 0xfe0 |
140 | #define PERIPH_REV_SHIFT 20 |
141 | #define PERIPH_REV_MASK 0xf |
142 | #define PERIPH_REV_R0P0 0 |
143 | #define PERIPH_REV_R1P0 1 |
144 | #define PERIPH_REV_R1P1 2 |
145 | |
146 | #define CR0_PERIPH_REQ_SET (1 << 0) |
147 | #define CR0_BOOT_EN_SET (1 << 1) |
148 | #define CR0_BOOT_MAN_NS (1 << 2) |
149 | #define CR0_NUM_CHANS_SHIFT 4 |
150 | #define CR0_NUM_CHANS_MASK 0x7 |
151 | #define CR0_NUM_PERIPH_SHIFT 12 |
152 | #define CR0_NUM_PERIPH_MASK 0x1f |
153 | #define CR0_NUM_EVENTS_SHIFT 17 |
154 | #define CR0_NUM_EVENTS_MASK 0x1f |
155 | |
156 | #define CR1_ICACHE_LEN_SHIFT 0 |
157 | #define CR1_ICACHE_LEN_MASK 0x7 |
158 | #define CR1_NUM_ICACHELINES_SHIFT 4 |
159 | #define CR1_NUM_ICACHELINES_MASK 0xf |
160 | |
161 | #define CRD_DATA_WIDTH_SHIFT 0 |
162 | #define CRD_DATA_WIDTH_MASK 0x7 |
163 | #define CRD_WR_CAP_SHIFT 4 |
164 | #define CRD_WR_CAP_MASK 0x7 |
165 | #define CRD_WR_Q_DEP_SHIFT 8 |
166 | #define CRD_WR_Q_DEP_MASK 0xf |
167 | #define CRD_RD_CAP_SHIFT 12 |
168 | #define CRD_RD_CAP_MASK 0x7 |
169 | #define CRD_RD_Q_DEP_SHIFT 16 |
170 | #define CRD_RD_Q_DEP_MASK 0xf |
171 | #define CRD_DATA_BUFF_SHIFT 20 |
172 | #define CRD_DATA_BUFF_MASK 0x3ff |
173 | |
174 | #define PART 0x330 |
175 | #define DESIGNER 0x41 |
176 | #define REVISION 0x0 |
177 | #define INTEG_CFG 0x0 |
178 | #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12)) |
179 | |
180 | #define PL330_STATE_STOPPED (1 << 0) |
181 | #define PL330_STATE_EXECUTING (1 << 1) |
182 | #define PL330_STATE_WFE (1 << 2) |
183 | #define PL330_STATE_FAULTING (1 << 3) |
184 | #define PL330_STATE_COMPLETING (1 << 4) |
185 | #define PL330_STATE_WFP (1 << 5) |
186 | #define PL330_STATE_KILLING (1 << 6) |
187 | #define PL330_STATE_FAULT_COMPLETING (1 << 7) |
188 | #define PL330_STATE_CACHEMISS (1 << 8) |
189 | #define PL330_STATE_UPDTPC (1 << 9) |
190 | #define PL330_STATE_ATBARRIER (1 << 10) |
191 | #define PL330_STATE_QUEUEBUSY (1 << 11) |
192 | #define PL330_STATE_INVALID (1 << 15) |
193 | |
194 | #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \ |
195 | | PL330_STATE_WFE | PL330_STATE_FAULTING) |
196 | |
197 | #define CMD_DMAADDH 0x54 |
198 | #define CMD_DMAEND 0x00 |
199 | #define CMD_DMAFLUSHP 0x35 |
200 | #define CMD_DMAGO 0xa0 |
201 | #define CMD_DMALD 0x04 |
202 | #define CMD_DMALDP 0x25 |
203 | #define CMD_DMALP 0x20 |
204 | #define CMD_DMALPEND 0x28 |
205 | #define CMD_DMAKILL 0x01 |
206 | #define CMD_DMAMOV 0xbc |
207 | #define CMD_DMANOP 0x18 |
208 | #define CMD_DMARMB 0x12 |
209 | #define CMD_DMASEV 0x34 |
210 | #define CMD_DMAST 0x08 |
211 | #define CMD_DMASTP 0x29 |
212 | #define CMD_DMASTZ 0x0c |
213 | #define CMD_DMAWFE 0x36 |
214 | #define CMD_DMAWFP 0x30 |
215 | #define CMD_DMAWMB 0x13 |
216 | |
217 | #define SZ_DMAADDH 3 |
218 | #define SZ_DMAEND 1 |
219 | #define SZ_DMAFLUSHP 2 |
220 | #define SZ_DMALD 1 |
221 | #define SZ_DMALDP 2 |
222 | #define SZ_DMALP 2 |
223 | #define SZ_DMALPEND 2 |
224 | #define SZ_DMAKILL 1 |
225 | #define SZ_DMAMOV 6 |
226 | #define SZ_DMANOP 1 |
227 | #define SZ_DMARMB 1 |
228 | #define SZ_DMASEV 2 |
229 | #define SZ_DMAST 1 |
230 | #define SZ_DMASTP 2 |
231 | #define SZ_DMASTZ 1 |
232 | #define SZ_DMAWFE 2 |
233 | #define SZ_DMAWFP 2 |
234 | #define SZ_DMAWMB 1 |
235 | #define SZ_DMAGO 6 |
236 | |
237 | #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1) |
238 | #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7)) |
239 | |
240 | #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr)) |
241 | #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr)) |
242 | |
243 | /* |
244 | * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req |
245 | * at 1byte/burst for P<->M and M<->M respectively. |
246 | * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req |
247 | * should be enough for P<->M and M<->M respectively. |
248 | */ |
249 | #define MCODE_BUFF_PER_REQ 256 |
250 | |
251 | /* Use this _only_ to wait on transient states */ |
252 | #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); |
253 | |
254 | #ifdef PL330_DEBUG_MCGEN |
255 | static unsigned cmd_line; |
256 | #define PL330_DBGCMD_DUMP(off, x...) do { \ |
257 | printk("%x:", cmd_line); \ |
258 | printk(KERN_CONT x); \ |
259 | cmd_line += off; \ |
260 | } while (0) |
261 | #define PL330_DBGMC_START(addr) (cmd_line = addr) |
262 | #else |
263 | #define PL330_DBGCMD_DUMP(off, x...) do {} while (0) |
264 | #define PL330_DBGMC_START(addr) do {} while (0) |
265 | #endif |
266 | |
267 | /* The number of default descriptors */ |
268 | |
269 | #define NR_DEFAULT_DESC 16 |
270 | |
271 | /* Delay for runtime PM autosuspend, ms */ |
272 | #define PL330_AUTOSUSPEND_DELAY 20 |
273 | |
274 | /* Populated by the PL330 core driver for DMA API driver's info */ |
275 | struct pl330_config { |
276 | u32 periph_id; |
277 | #define DMAC_MODE_NS (1 << 0) |
278 | unsigned int mode; |
279 | unsigned int data_bus_width:10; /* In number of bits */ |
280 | unsigned int data_buf_dep:11; |
281 | unsigned int num_chan:4; |
282 | unsigned int num_peri:6; |
283 | u32 peri_ns; |
284 | unsigned int num_events:6; |
285 | u32 irq_ns; |
286 | }; |
287 | |
288 | /* |
289 | * Request Configuration. |
290 | * The PL330 core does not modify this and uses the last |
291 | * working configuration if the request doesn't provide any. |
292 | * |
293 | * The Client may want to provide this info only for the |
294 | * first request and a request with new settings. |
295 | */ |
296 | struct pl330_reqcfg { |
297 | /* Address Incrementing */ |
298 | unsigned dst_inc:1; |
299 | unsigned src_inc:1; |
300 | |
301 | /* |
302 | * For now, the SRC & DST protection levels |
303 | * and burst size/length are assumed same. |
304 | */ |
305 | bool nonsecure; |
306 | bool privileged; |
307 | bool insnaccess; |
308 | unsigned brst_len:5; |
309 | unsigned brst_size:3; /* in power of 2 */ |
310 | |
311 | enum pl330_cachectrl dcctl; |
312 | enum pl330_cachectrl scctl; |
313 | enum pl330_byteswap swap; |
314 | struct pl330_config *pcfg; |
315 | }; |
316 | |
317 | /* |
318 | * One cycle of DMAC operation. |
319 | * There may be more than one xfer in a request. |
320 | */ |
321 | struct pl330_xfer { |
322 | u32 src_addr; |
323 | u32 dst_addr; |
324 | /* Size to xfer */ |
325 | u32 bytes; |
326 | }; |
327 | |
328 | /* The xfer callbacks are made with one of these arguments. */ |
329 | enum pl330_op_err { |
330 | /* The all xfers in the request were success. */ |
331 | PL330_ERR_NONE, |
332 | /* If req aborted due to global error. */ |
333 | PL330_ERR_ABORT, |
334 | /* If req failed due to problem with Channel. */ |
335 | PL330_ERR_FAIL, |
336 | }; |
337 | |
338 | enum dmamov_dst { |
339 | SAR = 0, |
340 | CCR, |
341 | DAR, |
342 | }; |
343 | |
344 | enum pl330_dst { |
345 | SRC = 0, |
346 | DST, |
347 | }; |
348 | |
349 | enum pl330_cond { |
350 | SINGLE, |
351 | BURST, |
352 | ALWAYS, |
353 | }; |
354 | |
355 | struct dma_pl330_desc; |
356 | |
357 | struct _pl330_req { |
358 | u32 mc_bus; |
359 | void *mc_cpu; |
360 | struct dma_pl330_desc *desc; |
361 | }; |
362 | |
363 | /* ToBeDone for tasklet */ |
364 | struct _pl330_tbd { |
365 | bool reset_dmac; |
366 | bool reset_mngr; |
367 | u8 reset_chan; |
368 | }; |
369 | |
370 | /* A DMAC Thread */ |
371 | struct pl330_thread { |
372 | u8 id; |
373 | int ev; |
374 | /* If the channel is not yet acquired by any client */ |
375 | bool free; |
376 | /* Parent DMAC */ |
377 | struct pl330_dmac *dmac; |
378 | /* Only two at a time */ |
379 | struct _pl330_req req[2]; |
380 | /* Index of the last enqueued request */ |
381 | unsigned lstenq; |
382 | /* Index of the last submitted request or -1 if the DMA is stopped */ |
383 | int req_running; |
384 | }; |
385 | |
386 | enum pl330_dmac_state { |
387 | UNINIT, |
388 | INIT, |
389 | DYING, |
390 | }; |
391 | |
392 | enum desc_status { |
393 | /* In the DMAC pool */ |
394 | FREE, |
395 | /* |
396 | * Allocated to some channel during prep_xxx |
397 | * Also may be sitting on the work_list. |
398 | */ |
399 | PREP, |
400 | /* |
401 | * Sitting on the work_list and already submitted |
402 | * to the PL330 core. Not more than two descriptors |
403 | * of a channel can be BUSY at any time. |
404 | */ |
405 | BUSY, |
406 | /* |
407 | * Pause was called while descriptor was BUSY. Due to hardware |
408 | * limitations, only termination is possible for descriptors |
409 | * that have been paused. |
410 | */ |
411 | PAUSED, |
412 | /* |
413 | * Sitting on the channel work_list but xfer done |
414 | * by PL330 core |
415 | */ |
416 | DONE, |
417 | }; |
418 | |
419 | struct dma_pl330_chan { |
420 | /* Schedule desc completion */ |
421 | struct tasklet_struct task; |
422 | |
423 | /* DMA-Engine Channel */ |
424 | struct dma_chan chan; |
425 | |
426 | /* List of submitted descriptors */ |
427 | struct list_head submitted_list; |
428 | /* List of issued descriptors */ |
429 | struct list_head work_list; |
430 | /* List of completed descriptors */ |
431 | struct list_head completed_list; |
432 | |
433 | /* Pointer to the DMAC that manages this channel, |
434 | * NULL if the channel is available to be acquired. |
435 | * As the parent, this DMAC also provides descriptors |
436 | * to the channel. |
437 | */ |
438 | struct pl330_dmac *dmac; |
439 | |
440 | /* To protect channel manipulation */ |
441 | spinlock_t lock; |
442 | |
443 | /* |
444 | * Hardware channel thread of PL330 DMAC. NULL if the channel is |
445 | * available. |
446 | */ |
447 | struct pl330_thread *thread; |
448 | |
449 | /* For D-to-M and M-to-D channels */ |
450 | int burst_sz; /* the peripheral fifo width */ |
451 | int burst_len; /* the number of burst */ |
452 | phys_addr_t fifo_addr; |
453 | /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */ |
454 | dma_addr_t fifo_dma; |
455 | enum dma_data_direction dir; |
456 | struct dma_slave_config slave_config; |
457 | |
458 | /* for cyclic capability */ |
459 | bool cyclic; |
460 | |
461 | /* for runtime pm tracking */ |
462 | bool active; |
463 | }; |
464 | |
465 | struct pl330_dmac { |
466 | /* DMA-Engine Device */ |
467 | struct dma_device ddma; |
468 | |
469 | /* Pool of descriptors available for the DMAC's channels */ |
470 | struct list_head desc_pool; |
471 | /* To protect desc_pool manipulation */ |
472 | spinlock_t pool_lock; |
473 | |
474 | /* Size of MicroCode buffers for each channel. */ |
475 | unsigned mcbufsz; |
476 | /* ioremap'ed address of PL330 registers. */ |
477 | void __iomem *base; |
478 | /* Populated by the PL330 core driver during pl330_add */ |
479 | struct pl330_config pcfg; |
480 | |
481 | spinlock_t lock; |
482 | /* Maximum possible events/irqs */ |
483 | int events[32]; |
484 | /* BUS address of MicroCode buffer */ |
485 | dma_addr_t mcode_bus; |
486 | /* CPU address of MicroCode buffer */ |
487 | void *mcode_cpu; |
488 | /* List of all Channel threads */ |
489 | struct pl330_thread *channels; |
490 | /* Pointer to the MANAGER thread */ |
491 | struct pl330_thread *manager; |
492 | /* To handle bad news in interrupt */ |
493 | struct tasklet_struct tasks; |
494 | struct _pl330_tbd dmac_tbd; |
495 | /* State of DMAC operation */ |
496 | enum pl330_dmac_state state; |
497 | /* Holds list of reqs with due callbacks */ |
498 | struct list_head req_done; |
499 | |
500 | /* Peripheral channels connected to this DMAC */ |
501 | unsigned int num_peripherals; |
502 | struct dma_pl330_chan *peripherals; /* keep at end */ |
503 | int quirks; |
504 | |
505 | struct reset_control *rstc; |
506 | struct reset_control *rstc_ocp; |
507 | }; |
508 | |
509 | static struct pl330_of_quirks { |
510 | char *quirk; |
511 | int id; |
512 | } of_quirks[] = { |
513 | { |
514 | .quirk = "arm,pl330-broken-no-flushp", |
515 | .id = PL330_QUIRK_BROKEN_NO_FLUSHP, |
516 | }, |
517 | { |
518 | .quirk = "arm,pl330-periph-burst", |
519 | .id = PL330_QUIRK_PERIPH_BURST, |
520 | } |
521 | }; |
522 | |
523 | struct dma_pl330_desc { |
524 | /* To attach to a queue as child */ |
525 | struct list_head node; |
526 | |
527 | /* Descriptor for the DMA Engine API */ |
528 | struct dma_async_tx_descriptor txd; |
529 | |
530 | /* Xfer for PL330 core */ |
531 | struct pl330_xfer px; |
532 | |
533 | struct pl330_reqcfg rqcfg; |
534 | |
535 | enum desc_status status; |
536 | |
537 | int bytes_requested; |
538 | bool last; |
539 | |
540 | /* The channel which currently holds this desc */ |
541 | struct dma_pl330_chan *pchan; |
542 | |
543 | enum dma_transfer_direction rqtype; |
544 | /* Index of peripheral for the xfer. */ |
545 | unsigned peri:5; |
546 | /* Hook to attach to DMAC's list of reqs with due callback */ |
547 | struct list_head rqd; |
548 | }; |
549 | |
550 | struct _xfer_spec { |
551 | u32 ccr; |
552 | struct dma_pl330_desc *desc; |
553 | }; |
554 | |
555 | static int pl330_config_write(struct dma_chan *chan, |
556 | struct dma_slave_config *slave_config, |
557 | enum dma_transfer_direction direction); |
558 | |
559 | static inline bool _queue_full(struct pl330_thread *thrd) |
560 | { |
561 | return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; |
562 | } |
563 | |
564 | static inline bool is_manager(struct pl330_thread *thrd) |
565 | { |
566 | return thrd->dmac->manager == thrd; |
567 | } |
568 | |
569 | /* If manager of the thread is in Non-Secure mode */ |
570 | static inline bool _manager_ns(struct pl330_thread *thrd) |
571 | { |
572 | return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false; |
573 | } |
574 | |
575 | static inline u32 get_revision(u32 periph_id) |
576 | { |
577 | return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; |
578 | } |
579 | |
580 | static inline u32 _emit_END(unsigned dry_run, u8 buf[]) |
581 | { |
582 | if (dry_run) |
583 | return SZ_DMAEND; |
584 | |
585 | buf[0] = CMD_DMAEND; |
586 | |
587 | PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n"); |
588 | |
589 | return SZ_DMAEND; |
590 | } |
591 | |
592 | static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri) |
593 | { |
594 | if (dry_run) |
595 | return SZ_DMAFLUSHP; |
596 | |
597 | buf[0] = CMD_DMAFLUSHP; |
598 | |
599 | peri &= 0x1f; |
600 | peri <<= 3; |
601 | buf[1] = peri; |
602 | |
603 | PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3); |
604 | |
605 | return SZ_DMAFLUSHP; |
606 | } |
607 | |
608 | static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond) |
609 | { |
610 | if (dry_run) |
611 | return SZ_DMALD; |
612 | |
613 | buf[0] = CMD_DMALD; |
614 | |
615 | if (cond == SINGLE) |
616 | buf[0] |= (0 << 1) | (1 << 0); |
617 | else if (cond == BURST) |
618 | buf[0] |= (1 << 1) | (1 << 0); |
619 | |
620 | PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n", |
621 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); |
622 | |
623 | return SZ_DMALD; |
624 | } |
625 | |
626 | static inline u32 _emit_LDP(unsigned dry_run, u8 buf[], |
627 | enum pl330_cond cond, u8 peri) |
628 | { |
629 | if (dry_run) |
630 | return SZ_DMALDP; |
631 | |
632 | buf[0] = CMD_DMALDP; |
633 | |
634 | if (cond == BURST) |
635 | buf[0] |= (1 << 1); |
636 | |
637 | peri &= 0x1f; |
638 | peri <<= 3; |
639 | buf[1] = peri; |
640 | |
641 | PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n", |
642 | cond == SINGLE ? 'S' : 'B', peri >> 3); |
643 | |
644 | return SZ_DMALDP; |
645 | } |
646 | |
647 | static inline u32 _emit_LP(unsigned dry_run, u8 buf[], |
648 | unsigned loop, u8 cnt) |
649 | { |
650 | if (dry_run) |
651 | return SZ_DMALP; |
652 | |
653 | buf[0] = CMD_DMALP; |
654 | |
655 | if (loop) |
656 | buf[0] |= (1 << 1); |
657 | |
658 | cnt--; /* DMAC increments by 1 internally */ |
659 | buf[1] = cnt; |
660 | |
661 | PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt); |
662 | |
663 | return SZ_DMALP; |
664 | } |
665 | |
666 | struct _arg_LPEND { |
667 | enum pl330_cond cond; |
668 | bool forever; |
669 | unsigned loop; |
670 | u8 bjump; |
671 | }; |
672 | |
673 | static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[], |
674 | const struct _arg_LPEND *arg) |
675 | { |
676 | enum pl330_cond cond = arg->cond; |
677 | bool forever = arg->forever; |
678 | unsigned loop = arg->loop; |
679 | u8 bjump = arg->bjump; |
680 | |
681 | if (dry_run) |
682 | return SZ_DMALPEND; |
683 | |
684 | buf[0] = CMD_DMALPEND; |
685 | |
686 | if (loop) |
687 | buf[0] |= (1 << 2); |
688 | |
689 | if (!forever) |
690 | buf[0] |= (1 << 4); |
691 | |
692 | if (cond == SINGLE) |
693 | buf[0] |= (0 << 1) | (1 << 0); |
694 | else if (cond == BURST) |
695 | buf[0] |= (1 << 1) | (1 << 0); |
696 | |
697 | buf[1] = bjump; |
698 | |
699 | PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n", |
700 | forever ? "FE": "END", |
701 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'), |
702 | loop ? '1' : '0', |
703 | bjump); |
704 | |
705 | return SZ_DMALPEND; |
706 | } |
707 | |
708 | static inline u32 _emit_KILL(unsigned dry_run, u8 buf[]) |
709 | { |
710 | if (dry_run) |
711 | return SZ_DMAKILL; |
712 | |
713 | buf[0] = CMD_DMAKILL; |
714 | |
715 | return SZ_DMAKILL; |
716 | } |
717 | |
718 | static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], |
719 | enum dmamov_dst dst, u32 val) |
720 | { |
721 | if (dry_run) |
722 | return SZ_DMAMOV; |
723 | |
724 | buf[0] = CMD_DMAMOV; |
725 | buf[1] = dst; |
726 | buf[2] = val; |
727 | buf[3] = val >> 8; |
728 | buf[4] = val >> 16; |
729 | buf[5] = val >> 24; |
730 | |
731 | PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", |
732 | dst == SAR ? "SAR": (dst == DAR ? "DAR": "CCR"), val); |
733 | |
734 | return SZ_DMAMOV; |
735 | } |
736 | |
737 | static inline u32 _emit_RMB(unsigned dry_run, u8 buf[]) |
738 | { |
739 | if (dry_run) |
740 | return SZ_DMARMB; |
741 | |
742 | buf[0] = CMD_DMARMB; |
743 | |
744 | PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n"); |
745 | |
746 | return SZ_DMARMB; |
747 | } |
748 | |
749 | static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev) |
750 | { |
751 | if (dry_run) |
752 | return SZ_DMASEV; |
753 | |
754 | buf[0] = CMD_DMASEV; |
755 | |
756 | ev &= 0x1f; |
757 | ev <<= 3; |
758 | buf[1] = ev; |
759 | |
760 | PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3); |
761 | |
762 | return SZ_DMASEV; |
763 | } |
764 | |
765 | static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond) |
766 | { |
767 | if (dry_run) |
768 | return SZ_DMAST; |
769 | |
770 | buf[0] = CMD_DMAST; |
771 | |
772 | if (cond == SINGLE) |
773 | buf[0] |= (0 << 1) | (1 << 0); |
774 | else if (cond == BURST) |
775 | buf[0] |= (1 << 1) | (1 << 0); |
776 | |
777 | PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n", |
778 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); |
779 | |
780 | return SZ_DMAST; |
781 | } |
782 | |
783 | static inline u32 _emit_STP(unsigned dry_run, u8 buf[], |
784 | enum pl330_cond cond, u8 peri) |
785 | { |
786 | if (dry_run) |
787 | return SZ_DMASTP; |
788 | |
789 | buf[0] = CMD_DMASTP; |
790 | |
791 | if (cond == BURST) |
792 | buf[0] |= (1 << 1); |
793 | |
794 | peri &= 0x1f; |
795 | peri <<= 3; |
796 | buf[1] = peri; |
797 | |
798 | PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n", |
799 | cond == SINGLE ? 'S' : 'B', peri >> 3); |
800 | |
801 | return SZ_DMASTP; |
802 | } |
803 | |
804 | static inline u32 _emit_WFP(unsigned dry_run, u8 buf[], |
805 | enum pl330_cond cond, u8 peri) |
806 | { |
807 | if (dry_run) |
808 | return SZ_DMAWFP; |
809 | |
810 | buf[0] = CMD_DMAWFP; |
811 | |
812 | if (cond == SINGLE) |
813 | buf[0] |= (0 << 1) | (0 << 0); |
814 | else if (cond == BURST) |
815 | buf[0] |= (1 << 1) | (0 << 0); |
816 | else |
817 | buf[0] |= (0 << 1) | (1 << 0); |
818 | |
819 | peri &= 0x1f; |
820 | peri <<= 3; |
821 | buf[1] = peri; |
822 | |
823 | PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n", |
824 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3); |
825 | |
826 | return SZ_DMAWFP; |
827 | } |
828 | |
829 | static inline u32 _emit_WMB(unsigned dry_run, u8 buf[]) |
830 | { |
831 | if (dry_run) |
832 | return SZ_DMAWMB; |
833 | |
834 | buf[0] = CMD_DMAWMB; |
835 | |
836 | PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n"); |
837 | |
838 | return SZ_DMAWMB; |
839 | } |
840 | |
841 | struct _arg_GO { |
842 | u8 chan; |
843 | u32 addr; |
844 | unsigned ns; |
845 | }; |
846 | |
847 | static inline u32 _emit_GO(unsigned dry_run, u8 buf[], |
848 | const struct _arg_GO *arg) |
849 | { |
850 | u8 chan = arg->chan; |
851 | u32 addr = arg->addr; |
852 | unsigned ns = arg->ns; |
853 | |
854 | if (dry_run) |
855 | return SZ_DMAGO; |
856 | |
857 | buf[0] = CMD_DMAGO; |
858 | buf[0] |= (ns << 1); |
859 | buf[1] = chan & 0x7; |
860 | buf[2] = addr; |
861 | buf[3] = addr >> 8; |
862 | buf[4] = addr >> 16; |
863 | buf[5] = addr >> 24; |
864 | |
865 | return SZ_DMAGO; |
866 | } |
867 | |
868 | #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) |
869 | |
870 | /* Returns Time-Out */ |
871 | static bool _until_dmac_idle(struct pl330_thread *thrd) |
872 | { |
873 | void __iomem *regs = thrd->dmac->base; |
874 | unsigned long loops = msecs_to_loops(5); |
875 | |
876 | do { |
877 | /* Until Manager is Idle */ |
878 | if (!(readl(addr: regs + DBGSTATUS) & DBG_BUSY)) |
879 | break; |
880 | |
881 | cpu_relax(); |
882 | } while (--loops); |
883 | |
884 | if (!loops) |
885 | return true; |
886 | |
887 | return false; |
888 | } |
889 | |
890 | static inline void _execute_DBGINSN(struct pl330_thread *thrd, |
891 | u8 insn[], bool as_manager) |
892 | { |
893 | void __iomem *regs = thrd->dmac->base; |
894 | u32 val; |
895 | |
896 | /* If timed out due to halted state-machine */ |
897 | if (_until_dmac_idle(thrd)) { |
898 | dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n"); |
899 | return; |
900 | } |
901 | |
902 | val = (insn[0] << 16) | (insn[1] << 24); |
903 | if (!as_manager) { |
904 | val |= (1 << 0); |
905 | val |= (thrd->id << 8); /* Channel Number */ |
906 | } |
907 | writel(val, addr: regs + DBGINST0); |
908 | |
909 | val = le32_to_cpu(*((__le32 *)&insn[2])); |
910 | writel(val, addr: regs + DBGINST1); |
911 | |
912 | /* Get going */ |
913 | writel(val: 0, addr: regs + DBGCMD); |
914 | } |
915 | |
916 | static inline u32 _state(struct pl330_thread *thrd) |
917 | { |
918 | void __iomem *regs = thrd->dmac->base; |
919 | u32 val; |
920 | |
921 | if (is_manager(thrd)) |
922 | val = readl(addr: regs + DS) & 0xf; |
923 | else |
924 | val = readl(addr: regs + CS(thrd->id)) & 0xf; |
925 | |
926 | switch (val) { |
927 | case DS_ST_STOP: |
928 | return PL330_STATE_STOPPED; |
929 | case DS_ST_EXEC: |
930 | return PL330_STATE_EXECUTING; |
931 | case DS_ST_CMISS: |
932 | return PL330_STATE_CACHEMISS; |
933 | case DS_ST_UPDTPC: |
934 | return PL330_STATE_UPDTPC; |
935 | case DS_ST_WFE: |
936 | return PL330_STATE_WFE; |
937 | case DS_ST_FAULT: |
938 | return PL330_STATE_FAULTING; |
939 | case DS_ST_ATBRR: |
940 | if (is_manager(thrd)) |
941 | return PL330_STATE_INVALID; |
942 | else |
943 | return PL330_STATE_ATBARRIER; |
944 | case DS_ST_QBUSY: |
945 | if (is_manager(thrd)) |
946 | return PL330_STATE_INVALID; |
947 | else |
948 | return PL330_STATE_QUEUEBUSY; |
949 | case DS_ST_WFP: |
950 | if (is_manager(thrd)) |
951 | return PL330_STATE_INVALID; |
952 | else |
953 | return PL330_STATE_WFP; |
954 | case DS_ST_KILL: |
955 | if (is_manager(thrd)) |
956 | return PL330_STATE_INVALID; |
957 | else |
958 | return PL330_STATE_KILLING; |
959 | case DS_ST_CMPLT: |
960 | if (is_manager(thrd)) |
961 | return PL330_STATE_INVALID; |
962 | else |
963 | return PL330_STATE_COMPLETING; |
964 | case DS_ST_FLTCMP: |
965 | if (is_manager(thrd)) |
966 | return PL330_STATE_INVALID; |
967 | else |
968 | return PL330_STATE_FAULT_COMPLETING; |
969 | default: |
970 | return PL330_STATE_INVALID; |
971 | } |
972 | } |
973 | |
974 | static void _stop(struct pl330_thread *thrd) |
975 | { |
976 | void __iomem *regs = thrd->dmac->base; |
977 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; |
978 | u32 inten = readl(addr: regs + INTEN); |
979 | |
980 | if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) |
981 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); |
982 | |
983 | /* Return if nothing needs to be done */ |
984 | if (_state(thrd) == PL330_STATE_COMPLETING |
985 | || _state(thrd) == PL330_STATE_KILLING |
986 | || _state(thrd) == PL330_STATE_STOPPED) |
987 | return; |
988 | |
989 | _emit_KILL(dry_run: 0, buf: insn); |
990 | |
991 | _execute_DBGINSN(thrd, insn, as_manager: is_manager(thrd)); |
992 | |
993 | /* clear the event */ |
994 | if (inten & (1 << thrd->ev)) |
995 | writel(val: 1 << thrd->ev, addr: regs + INTCLR); |
996 | /* Stop generating interrupts for SEV */ |
997 | writel(val: inten & ~(1 << thrd->ev), addr: regs + INTEN); |
998 | } |
999 | |
1000 | /* Start doing req 'idx' of thread 'thrd' */ |
1001 | static bool _trigger(struct pl330_thread *thrd) |
1002 | { |
1003 | void __iomem *regs = thrd->dmac->base; |
1004 | struct _pl330_req *req; |
1005 | struct dma_pl330_desc *desc; |
1006 | struct _arg_GO go; |
1007 | unsigned ns; |
1008 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; |
1009 | int idx; |
1010 | |
1011 | /* Return if already ACTIVE */ |
1012 | if (_state(thrd) != PL330_STATE_STOPPED) |
1013 | return true; |
1014 | |
1015 | idx = 1 - thrd->lstenq; |
1016 | if (thrd->req[idx].desc != NULL) { |
1017 | req = &thrd->req[idx]; |
1018 | } else { |
1019 | idx = thrd->lstenq; |
1020 | if (thrd->req[idx].desc != NULL) |
1021 | req = &thrd->req[idx]; |
1022 | else |
1023 | req = NULL; |
1024 | } |
1025 | |
1026 | /* Return if no request */ |
1027 | if (!req) |
1028 | return true; |
1029 | |
1030 | /* Return if req is running */ |
1031 | if (idx == thrd->req_running) |
1032 | return true; |
1033 | |
1034 | desc = req->desc; |
1035 | |
1036 | ns = desc->rqcfg.nonsecure ? 1 : 0; |
1037 | |
1038 | /* See 'Abort Sources' point-4 at Page 2-25 */ |
1039 | if (_manager_ns(thrd) && !ns) |
1040 | dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n", |
1041 | __func__, __LINE__); |
1042 | |
1043 | go.chan = thrd->id; |
1044 | go.addr = req->mc_bus; |
1045 | go.ns = ns; |
1046 | _emit_GO(dry_run: 0, buf: insn, arg: &go); |
1047 | |
1048 | /* Set to generate interrupts for SEV */ |
1049 | writel(readl(addr: regs + INTEN) | (1 << thrd->ev), addr: regs + INTEN); |
1050 | |
1051 | /* Only manager can execute GO */ |
1052 | _execute_DBGINSN(thrd, insn, as_manager: true); |
1053 | |
1054 | thrd->req_running = idx; |
1055 | |
1056 | if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM) |
1057 | UNTIL(thrd, PL330_STATE_WFP); |
1058 | |
1059 | return true; |
1060 | } |
1061 | |
1062 | static bool pl330_start_thread(struct pl330_thread *thrd) |
1063 | { |
1064 | switch (_state(thrd)) { |
1065 | case PL330_STATE_FAULT_COMPLETING: |
1066 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); |
1067 | |
1068 | if (_state(thrd) == PL330_STATE_KILLING) |
1069 | UNTIL(thrd, PL330_STATE_STOPPED) |
1070 | fallthrough; |
1071 | |
1072 | case PL330_STATE_FAULTING: |
1073 | _stop(thrd); |
1074 | fallthrough; |
1075 | |
1076 | case PL330_STATE_KILLING: |
1077 | case PL330_STATE_COMPLETING: |
1078 | UNTIL(thrd, PL330_STATE_STOPPED) |
1079 | fallthrough; |
1080 | |
1081 | case PL330_STATE_STOPPED: |
1082 | return _trigger(thrd); |
1083 | |
1084 | case PL330_STATE_WFP: |
1085 | case PL330_STATE_QUEUEBUSY: |
1086 | case PL330_STATE_ATBARRIER: |
1087 | case PL330_STATE_UPDTPC: |
1088 | case PL330_STATE_CACHEMISS: |
1089 | case PL330_STATE_EXECUTING: |
1090 | return true; |
1091 | |
1092 | case PL330_STATE_WFE: /* For RESUME, nothing yet */ |
1093 | default: |
1094 | return false; |
1095 | } |
1096 | } |
1097 | |
1098 | static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], |
1099 | const struct _xfer_spec *pxs, int cyc) |
1100 | { |
1101 | int off = 0; |
1102 | struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg; |
1103 | |
1104 | /* check lock-up free version */ |
1105 | if (get_revision(periph_id: pcfg->periph_id) >= PERIPH_REV_R1P0) { |
1106 | while (cyc--) { |
1107 | off += _emit_LD(dry_run, buf: &buf[off], cond: ALWAYS); |
1108 | off += _emit_ST(dry_run, buf: &buf[off], cond: ALWAYS); |
1109 | } |
1110 | } else { |
1111 | while (cyc--) { |
1112 | off += _emit_LD(dry_run, buf: &buf[off], cond: ALWAYS); |
1113 | off += _emit_RMB(dry_run, buf: &buf[off]); |
1114 | off += _emit_ST(dry_run, buf: &buf[off], cond: ALWAYS); |
1115 | off += _emit_WMB(dry_run, buf: &buf[off]); |
1116 | } |
1117 | } |
1118 | |
1119 | return off; |
1120 | } |
1121 | |
1122 | static u32 _emit_load(unsigned int dry_run, u8 buf[], |
1123 | enum pl330_cond cond, enum dma_transfer_direction direction, |
1124 | u8 peri) |
1125 | { |
1126 | int off = 0; |
1127 | |
1128 | switch (direction) { |
1129 | case DMA_MEM_TO_MEM: |
1130 | case DMA_MEM_TO_DEV: |
1131 | off += _emit_LD(dry_run, buf: &buf[off], cond); |
1132 | break; |
1133 | |
1134 | case DMA_DEV_TO_MEM: |
1135 | if (cond == ALWAYS) { |
1136 | off += _emit_LDP(dry_run, buf: &buf[off], cond: SINGLE, |
1137 | peri); |
1138 | off += _emit_LDP(dry_run, buf: &buf[off], cond: BURST, |
1139 | peri); |
1140 | } else { |
1141 | off += _emit_LDP(dry_run, buf: &buf[off], cond, |
1142 | peri); |
1143 | } |
1144 | break; |
1145 | |
1146 | default: |
1147 | /* this code should be unreachable */ |
1148 | WARN_ON(1); |
1149 | break; |
1150 | } |
1151 | |
1152 | return off; |
1153 | } |
1154 | |
1155 | static inline u32 _emit_store(unsigned int dry_run, u8 buf[], |
1156 | enum pl330_cond cond, enum dma_transfer_direction direction, |
1157 | u8 peri) |
1158 | { |
1159 | int off = 0; |
1160 | |
1161 | switch (direction) { |
1162 | case DMA_MEM_TO_MEM: |
1163 | case DMA_DEV_TO_MEM: |
1164 | off += _emit_ST(dry_run, buf: &buf[off], cond); |
1165 | break; |
1166 | |
1167 | case DMA_MEM_TO_DEV: |
1168 | if (cond == ALWAYS) { |
1169 | off += _emit_STP(dry_run, buf: &buf[off], cond: SINGLE, |
1170 | peri); |
1171 | off += _emit_STP(dry_run, buf: &buf[off], cond: BURST, |
1172 | peri); |
1173 | } else { |
1174 | off += _emit_STP(dry_run, buf: &buf[off], cond, |
1175 | peri); |
1176 | } |
1177 | break; |
1178 | |
1179 | default: |
1180 | /* this code should be unreachable */ |
1181 | WARN_ON(1); |
1182 | break; |
1183 | } |
1184 | |
1185 | return off; |
1186 | } |
1187 | |
1188 | static inline int _ldst_peripheral(struct pl330_dmac *pl330, |
1189 | unsigned dry_run, u8 buf[], |
1190 | const struct _xfer_spec *pxs, int cyc, |
1191 | enum pl330_cond cond) |
1192 | { |
1193 | int off = 0; |
1194 | |
1195 | /* |
1196 | * do FLUSHP at beginning to clear any stale dma requests before the |
1197 | * first WFP. |
1198 | */ |
1199 | if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)) |
1200 | off += _emit_FLUSHP(dry_run, buf: &buf[off], peri: pxs->desc->peri); |
1201 | while (cyc--) { |
1202 | off += _emit_WFP(dry_run, buf: &buf[off], cond, peri: pxs->desc->peri); |
1203 | off += _emit_load(dry_run, buf: &buf[off], cond, direction: pxs->desc->rqtype, |
1204 | peri: pxs->desc->peri); |
1205 | off += _emit_store(dry_run, buf: &buf[off], cond, direction: pxs->desc->rqtype, |
1206 | peri: pxs->desc->peri); |
1207 | } |
1208 | |
1209 | return off; |
1210 | } |
1211 | |
1212 | static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[], |
1213 | const struct _xfer_spec *pxs, int cyc) |
1214 | { |
1215 | int off = 0; |
1216 | enum pl330_cond cond = BRST_LEN(pxs->ccr) > 1 ? BURST : SINGLE; |
1217 | |
1218 | if (pl330->quirks & PL330_QUIRK_PERIPH_BURST) |
1219 | cond = BURST; |
1220 | |
1221 | switch (pxs->desc->rqtype) { |
1222 | case DMA_MEM_TO_DEV: |
1223 | case DMA_DEV_TO_MEM: |
1224 | off += _ldst_peripheral(pl330, dry_run, buf: &buf[off], pxs, cyc, |
1225 | cond); |
1226 | break; |
1227 | |
1228 | case DMA_MEM_TO_MEM: |
1229 | off += _ldst_memtomem(dry_run, buf: &buf[off], pxs, cyc); |
1230 | break; |
1231 | |
1232 | default: |
1233 | /* this code should be unreachable */ |
1234 | WARN_ON(1); |
1235 | break; |
1236 | } |
1237 | |
1238 | return off; |
1239 | } |
1240 | |
1241 | /* |
1242 | * only the unaligned burst transfers have the dregs. |
1243 | * so, still transfer dregs with a reduced size burst |
1244 | * for mem-to-mem, mem-to-dev or dev-to-mem. |
1245 | */ |
1246 | static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[], |
1247 | const struct _xfer_spec *pxs, int transfer_length) |
1248 | { |
1249 | int off = 0; |
1250 | int dregs_ccr; |
1251 | |
1252 | if (transfer_length == 0) |
1253 | return off; |
1254 | |
1255 | /* |
1256 | * dregs_len = (total bytes - BURST_TO_BYTE(bursts, ccr)) / |
1257 | * BRST_SIZE(ccr) |
1258 | * the dregs len must be smaller than burst len, |
1259 | * so, for higher efficiency, we can modify CCR |
1260 | * to use a reduced size burst len for the dregs. |
1261 | */ |
1262 | dregs_ccr = pxs->ccr; |
1263 | dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) | |
1264 | (0xf << CC_DSTBRSTLEN_SHFT)); |
1265 | dregs_ccr |= (((transfer_length - 1) & 0xf) << |
1266 | CC_SRCBRSTLEN_SHFT); |
1267 | dregs_ccr |= (((transfer_length - 1) & 0xf) << |
1268 | CC_DSTBRSTLEN_SHFT); |
1269 | |
1270 | switch (pxs->desc->rqtype) { |
1271 | case DMA_MEM_TO_DEV: |
1272 | case DMA_DEV_TO_MEM: |
1273 | off += _emit_MOV(dry_run, buf: &buf[off], dst: CCR, val: dregs_ccr); |
1274 | off += _ldst_peripheral(pl330, dry_run, buf: &buf[off], pxs, cyc: 1, |
1275 | cond: BURST); |
1276 | break; |
1277 | |
1278 | case DMA_MEM_TO_MEM: |
1279 | off += _emit_MOV(dry_run, buf: &buf[off], dst: CCR, val: dregs_ccr); |
1280 | off += _ldst_memtomem(dry_run, buf: &buf[off], pxs, cyc: 1); |
1281 | break; |
1282 | |
1283 | default: |
1284 | /* this code should be unreachable */ |
1285 | WARN_ON(1); |
1286 | break; |
1287 | } |
1288 | |
1289 | return off; |
1290 | } |
1291 | |
1292 | /* Returns bytes consumed and updates bursts */ |
1293 | static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[], |
1294 | unsigned long *bursts, const struct _xfer_spec *pxs) |
1295 | { |
1296 | int cyc, cycmax, szlp, szlpend, szbrst, off; |
1297 | unsigned lcnt0, lcnt1, ljmp0, ljmp1; |
1298 | struct _arg_LPEND lpend; |
1299 | |
1300 | if (*bursts == 1) |
1301 | return _bursts(pl330, dry_run, buf, pxs, cyc: 1); |
1302 | |
1303 | /* Max iterations possible in DMALP is 256 */ |
1304 | if (*bursts >= 256*256) { |
1305 | lcnt1 = 256; |
1306 | lcnt0 = 256; |
1307 | cyc = *bursts / lcnt1 / lcnt0; |
1308 | } else if (*bursts > 256) { |
1309 | lcnt1 = 256; |
1310 | lcnt0 = *bursts / lcnt1; |
1311 | cyc = 1; |
1312 | } else { |
1313 | lcnt1 = *bursts; |
1314 | lcnt0 = 0; |
1315 | cyc = 1; |
1316 | } |
1317 | |
1318 | szlp = _emit_LP(dry_run: 1, buf, loop: 0, cnt: 0); |
1319 | szbrst = _bursts(pl330, dry_run: 1, buf, pxs, cyc: 1); |
1320 | |
1321 | lpend.cond = ALWAYS; |
1322 | lpend.forever = false; |
1323 | lpend.loop = 0; |
1324 | lpend.bjump = 0; |
1325 | szlpend = _emit_LPEND(dry_run: 1, buf, arg: &lpend); |
1326 | |
1327 | if (lcnt0) { |
1328 | szlp *= 2; |
1329 | szlpend *= 2; |
1330 | } |
1331 | |
1332 | /* |
1333 | * Max bursts that we can unroll due to limit on the |
1334 | * size of backward jump that can be encoded in DMALPEND |
1335 | * which is 8-bits and hence 255 |
1336 | */ |
1337 | cycmax = (255 - (szlp + szlpend)) / szbrst; |
1338 | |
1339 | cyc = (cycmax < cyc) ? cycmax : cyc; |
1340 | |
1341 | off = 0; |
1342 | |
1343 | if (lcnt0) { |
1344 | off += _emit_LP(dry_run, buf: &buf[off], loop: 0, cnt: lcnt0); |
1345 | ljmp0 = off; |
1346 | } |
1347 | |
1348 | off += _emit_LP(dry_run, buf: &buf[off], loop: 1, cnt: lcnt1); |
1349 | ljmp1 = off; |
1350 | |
1351 | off += _bursts(pl330, dry_run, buf: &buf[off], pxs, cyc); |
1352 | |
1353 | lpend.cond = ALWAYS; |
1354 | lpend.forever = false; |
1355 | lpend.loop = 1; |
1356 | lpend.bjump = off - ljmp1; |
1357 | off += _emit_LPEND(dry_run, buf: &buf[off], arg: &lpend); |
1358 | |
1359 | if (lcnt0) { |
1360 | lpend.cond = ALWAYS; |
1361 | lpend.forever = false; |
1362 | lpend.loop = 0; |
1363 | lpend.bjump = off - ljmp0; |
1364 | off += _emit_LPEND(dry_run, buf: &buf[off], arg: &lpend); |
1365 | } |
1366 | |
1367 | *bursts = lcnt1 * cyc; |
1368 | if (lcnt0) |
1369 | *bursts *= lcnt0; |
1370 | |
1371 | return off; |
1372 | } |
1373 | |
1374 | static inline int _setup_loops(struct pl330_dmac *pl330, |
1375 | unsigned dry_run, u8 buf[], |
1376 | const struct _xfer_spec *pxs) |
1377 | { |
1378 | struct pl330_xfer *x = &pxs->desc->px; |
1379 | u32 ccr = pxs->ccr; |
1380 | unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); |
1381 | int num_dregs = (x->bytes - BURST_TO_BYTE(bursts, ccr)) / |
1382 | BRST_SIZE(ccr); |
1383 | int off = 0; |
1384 | |
1385 | while (bursts) { |
1386 | c = bursts; |
1387 | off += _loop(pl330, dry_run, buf: &buf[off], bursts: &c, pxs); |
1388 | bursts -= c; |
1389 | } |
1390 | off += _dregs(pl330, dry_run, buf: &buf[off], pxs, transfer_length: num_dregs); |
1391 | |
1392 | return off; |
1393 | } |
1394 | |
1395 | static inline int _setup_xfer(struct pl330_dmac *pl330, |
1396 | unsigned dry_run, u8 buf[], |
1397 | const struct _xfer_spec *pxs) |
1398 | { |
1399 | struct pl330_xfer *x = &pxs->desc->px; |
1400 | int off = 0; |
1401 | |
1402 | /* DMAMOV SAR, x->src_addr */ |
1403 | off += _emit_MOV(dry_run, buf: &buf[off], dst: SAR, val: x->src_addr); |
1404 | /* DMAMOV DAR, x->dst_addr */ |
1405 | off += _emit_MOV(dry_run, buf: &buf[off], dst: DAR, val: x->dst_addr); |
1406 | |
1407 | /* Setup Loop(s) */ |
1408 | off += _setup_loops(pl330, dry_run, buf: &buf[off], pxs); |
1409 | |
1410 | return off; |
1411 | } |
1412 | |
1413 | /* |
1414 | * A req is a sequence of one or more xfer units. |
1415 | * Returns the number of bytes taken to setup the MC for the req. |
1416 | */ |
1417 | static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run, |
1418 | struct pl330_thread *thrd, unsigned index, |
1419 | struct _xfer_spec *pxs) |
1420 | { |
1421 | struct _pl330_req *req = &thrd->req[index]; |
1422 | u8 *buf = req->mc_cpu; |
1423 | int off = 0; |
1424 | |
1425 | PL330_DBGMC_START(req->mc_bus); |
1426 | |
1427 | /* DMAMOV CCR, ccr */ |
1428 | off += _emit_MOV(dry_run, buf: &buf[off], dst: CCR, val: pxs->ccr); |
1429 | |
1430 | off += _setup_xfer(pl330, dry_run, buf: &buf[off], pxs); |
1431 | |
1432 | /* DMASEV peripheral/event */ |
1433 | off += _emit_SEV(dry_run, buf: &buf[off], ev: thrd->ev); |
1434 | /* DMAEND */ |
1435 | off += _emit_END(dry_run, buf: &buf[off]); |
1436 | |
1437 | return off; |
1438 | } |
1439 | |
1440 | static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) |
1441 | { |
1442 | u32 ccr = 0; |
1443 | |
1444 | if (rqc->src_inc) |
1445 | ccr |= CC_SRCINC; |
1446 | |
1447 | if (rqc->dst_inc) |
1448 | ccr |= CC_DSTINC; |
1449 | |
1450 | /* We set same protection levels for Src and DST for now */ |
1451 | if (rqc->privileged) |
1452 | ccr |= CC_SRCPRI | CC_DSTPRI; |
1453 | if (rqc->nonsecure) |
1454 | ccr |= CC_SRCNS | CC_DSTNS; |
1455 | if (rqc->insnaccess) |
1456 | ccr |= CC_SRCIA | CC_DSTIA; |
1457 | |
1458 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT); |
1459 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT); |
1460 | |
1461 | ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); |
1462 | ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); |
1463 | |
1464 | ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT); |
1465 | ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT); |
1466 | |
1467 | ccr |= (rqc->swap << CC_SWAP_SHFT); |
1468 | |
1469 | return ccr; |
1470 | } |
1471 | |
1472 | /* |
1473 | * Submit a list of xfers after which the client wants notification. |
1474 | * Client is not notified after each xfer unit, just once after all |
1475 | * xfer units are done or some error occurs. |
1476 | */ |
1477 | static int pl330_submit_req(struct pl330_thread *thrd, |
1478 | struct dma_pl330_desc *desc) |
1479 | { |
1480 | struct pl330_dmac *pl330 = thrd->dmac; |
1481 | struct _xfer_spec xs; |
1482 | unsigned long flags; |
1483 | unsigned idx; |
1484 | u32 ccr; |
1485 | int ret = 0; |
1486 | |
1487 | switch (desc->rqtype) { |
1488 | case DMA_MEM_TO_DEV: |
1489 | break; |
1490 | |
1491 | case DMA_DEV_TO_MEM: |
1492 | break; |
1493 | |
1494 | case DMA_MEM_TO_MEM: |
1495 | break; |
1496 | |
1497 | default: |
1498 | return -ENOTSUPP; |
1499 | } |
1500 | |
1501 | if (pl330->state == DYING |
1502 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { |
1503 | dev_info(thrd->dmac->ddma.dev, "%s:%d\n", |
1504 | __func__, __LINE__); |
1505 | return -EAGAIN; |
1506 | } |
1507 | |
1508 | /* If request for non-existing peripheral */ |
1509 | if (desc->rqtype != DMA_MEM_TO_MEM && |
1510 | desc->peri >= pl330->pcfg.num_peri) { |
1511 | dev_info(thrd->dmac->ddma.dev, |
1512 | "%s:%d Invalid peripheral(%u)!\n", |
1513 | __func__, __LINE__, desc->peri); |
1514 | return -EINVAL; |
1515 | } |
1516 | |
1517 | spin_lock_irqsave(&pl330->lock, flags); |
1518 | |
1519 | if (_queue_full(thrd)) { |
1520 | ret = -EAGAIN; |
1521 | goto xfer_exit; |
1522 | } |
1523 | |
1524 | /* Prefer Secure Channel */ |
1525 | if (!_manager_ns(thrd)) |
1526 | desc->rqcfg.nonsecure = 0; |
1527 | else |
1528 | desc->rqcfg.nonsecure = 1; |
1529 | |
1530 | ccr = _prepare_ccr(rqc: &desc->rqcfg); |
1531 | |
1532 | idx = thrd->req[0].desc == NULL ? 0 : 1; |
1533 | |
1534 | xs.ccr = ccr; |
1535 | xs.desc = desc; |
1536 | |
1537 | /* First dry run to check if req is acceptable */ |
1538 | ret = _setup_req(pl330, dry_run: 1, thrd, index: idx, pxs: &xs); |
1539 | |
1540 | if (ret > pl330->mcbufsz / 2) { |
1541 | dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n", |
1542 | __func__, __LINE__, ret, pl330->mcbufsz / 2); |
1543 | ret = -ENOMEM; |
1544 | goto xfer_exit; |
1545 | } |
1546 | |
1547 | /* Hook the request */ |
1548 | thrd->lstenq = idx; |
1549 | thrd->req[idx].desc = desc; |
1550 | _setup_req(pl330, dry_run: 0, thrd, index: idx, pxs: &xs); |
1551 | |
1552 | ret = 0; |
1553 | |
1554 | xfer_exit: |
1555 | spin_unlock_irqrestore(lock: &pl330->lock, flags); |
1556 | |
1557 | return ret; |
1558 | } |
1559 | |
1560 | static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err) |
1561 | { |
1562 | struct dma_pl330_chan *pch; |
1563 | unsigned long flags; |
1564 | |
1565 | if (!desc) |
1566 | return; |
1567 | |
1568 | pch = desc->pchan; |
1569 | |
1570 | /* If desc aborted */ |
1571 | if (!pch) |
1572 | return; |
1573 | |
1574 | spin_lock_irqsave(&pch->lock, flags); |
1575 | |
1576 | desc->status = DONE; |
1577 | |
1578 | spin_unlock_irqrestore(lock: &pch->lock, flags); |
1579 | |
1580 | tasklet_schedule(t: &pch->task); |
1581 | } |
1582 | |
1583 | static void pl330_dotask(struct tasklet_struct *t) |
1584 | { |
1585 | struct pl330_dmac *pl330 = from_tasklet(pl330, t, tasks); |
1586 | unsigned long flags; |
1587 | int i; |
1588 | |
1589 | spin_lock_irqsave(&pl330->lock, flags); |
1590 | |
1591 | /* The DMAC itself gone nuts */ |
1592 | if (pl330->dmac_tbd.reset_dmac) { |
1593 | pl330->state = DYING; |
1594 | /* Reset the manager too */ |
1595 | pl330->dmac_tbd.reset_mngr = true; |
1596 | /* Clear the reset flag */ |
1597 | pl330->dmac_tbd.reset_dmac = false; |
1598 | } |
1599 | |
1600 | if (pl330->dmac_tbd.reset_mngr) { |
1601 | _stop(thrd: pl330->manager); |
1602 | /* Reset all channels */ |
1603 | pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1; |
1604 | /* Clear the reset flag */ |
1605 | pl330->dmac_tbd.reset_mngr = false; |
1606 | } |
1607 | |
1608 | for (i = 0; i < pl330->pcfg.num_chan; i++) { |
1609 | |
1610 | if (pl330->dmac_tbd.reset_chan & (1 << i)) { |
1611 | struct pl330_thread *thrd = &pl330->channels[i]; |
1612 | void __iomem *regs = pl330->base; |
1613 | enum pl330_op_err err; |
1614 | |
1615 | _stop(thrd); |
1616 | |
1617 | if (readl(addr: regs + FSC) & (1 << thrd->id)) |
1618 | err = PL330_ERR_FAIL; |
1619 | else |
1620 | err = PL330_ERR_ABORT; |
1621 | |
1622 | spin_unlock_irqrestore(lock: &pl330->lock, flags); |
1623 | dma_pl330_rqcb(desc: thrd->req[1 - thrd->lstenq].desc, err); |
1624 | dma_pl330_rqcb(desc: thrd->req[thrd->lstenq].desc, err); |
1625 | spin_lock_irqsave(&pl330->lock, flags); |
1626 | |
1627 | thrd->req[0].desc = NULL; |
1628 | thrd->req[1].desc = NULL; |
1629 | thrd->req_running = -1; |
1630 | |
1631 | /* Clear the reset flag */ |
1632 | pl330->dmac_tbd.reset_chan &= ~(1 << i); |
1633 | } |
1634 | } |
1635 | |
1636 | spin_unlock_irqrestore(lock: &pl330->lock, flags); |
1637 | |
1638 | return; |
1639 | } |
1640 | |
1641 | /* Returns 1 if state was updated, 0 otherwise */ |
1642 | static int pl330_update(struct pl330_dmac *pl330) |
1643 | { |
1644 | struct dma_pl330_desc *descdone; |
1645 | unsigned long flags; |
1646 | void __iomem *regs; |
1647 | u32 val; |
1648 | int id, ev, ret = 0; |
1649 | |
1650 | regs = pl330->base; |
1651 | |
1652 | spin_lock_irqsave(&pl330->lock, flags); |
1653 | |
1654 | val = readl(addr: regs + FSM) & 0x1; |
1655 | if (val) |
1656 | pl330->dmac_tbd.reset_mngr = true; |
1657 | else |
1658 | pl330->dmac_tbd.reset_mngr = false; |
1659 | |
1660 | val = readl(addr: regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1); |
1661 | pl330->dmac_tbd.reset_chan |= val; |
1662 | if (val) { |
1663 | int i = 0; |
1664 | while (i < pl330->pcfg.num_chan) { |
1665 | if (val & (1 << i)) { |
1666 | dev_info(pl330->ddma.dev, |
1667 | "Reset Channel-%d\t CS-%x FTC-%x\n", |
1668 | i, readl(regs + CS(i)), |
1669 | readl(regs + FTC(i))); |
1670 | _stop(thrd: &pl330->channels[i]); |
1671 | } |
1672 | i++; |
1673 | } |
1674 | } |
1675 | |
1676 | /* Check which event happened i.e, thread notified */ |
1677 | val = readl(addr: regs + ES); |
1678 | if (pl330->pcfg.num_events < 32 |
1679 | && val & ~((1 << pl330->pcfg.num_events) - 1)) { |
1680 | pl330->dmac_tbd.reset_dmac = true; |
1681 | dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__, |
1682 | __LINE__); |
1683 | ret = 1; |
1684 | goto updt_exit; |
1685 | } |
1686 | |
1687 | for (ev = 0; ev < pl330->pcfg.num_events; ev++) { |
1688 | if (val & (1 << ev)) { /* Event occurred */ |
1689 | struct pl330_thread *thrd; |
1690 | u32 inten = readl(addr: regs + INTEN); |
1691 | int active; |
1692 | |
1693 | /* Clear the event */ |
1694 | if (inten & (1 << ev)) |
1695 | writel(val: 1 << ev, addr: regs + INTCLR); |
1696 | |
1697 | ret = 1; |
1698 | |
1699 | id = pl330->events[ev]; |
1700 | |
1701 | thrd = &pl330->channels[id]; |
1702 | |
1703 | active = thrd->req_running; |
1704 | if (active == -1) /* Aborted */ |
1705 | continue; |
1706 | |
1707 | /* Detach the req */ |
1708 | descdone = thrd->req[active].desc; |
1709 | thrd->req[active].desc = NULL; |
1710 | |
1711 | thrd->req_running = -1; |
1712 | |
1713 | /* Get going again ASAP */ |
1714 | pl330_start_thread(thrd); |
1715 | |
1716 | /* For now, just make a list of callbacks to be done */ |
1717 | list_add_tail(new: &descdone->rqd, head: &pl330->req_done); |
1718 | } |
1719 | } |
1720 | |
1721 | /* Now that we are in no hurry, do the callbacks */ |
1722 | while (!list_empty(head: &pl330->req_done)) { |
1723 | descdone = list_first_entry(&pl330->req_done, |
1724 | struct dma_pl330_desc, rqd); |
1725 | list_del(entry: &descdone->rqd); |
1726 | spin_unlock_irqrestore(lock: &pl330->lock, flags); |
1727 | dma_pl330_rqcb(desc: descdone, err: PL330_ERR_NONE); |
1728 | spin_lock_irqsave(&pl330->lock, flags); |
1729 | } |
1730 | |
1731 | updt_exit: |
1732 | spin_unlock_irqrestore(lock: &pl330->lock, flags); |
1733 | |
1734 | if (pl330->dmac_tbd.reset_dmac |
1735 | || pl330->dmac_tbd.reset_mngr |
1736 | || pl330->dmac_tbd.reset_chan) { |
1737 | ret = 1; |
1738 | tasklet_schedule(t: &pl330->tasks); |
1739 | } |
1740 | |
1741 | return ret; |
1742 | } |
1743 | |
1744 | /* Reserve an event */ |
1745 | static inline int _alloc_event(struct pl330_thread *thrd) |
1746 | { |
1747 | struct pl330_dmac *pl330 = thrd->dmac; |
1748 | int ev; |
1749 | |
1750 | for (ev = 0; ev < pl330->pcfg.num_events; ev++) |
1751 | if (pl330->events[ev] == -1) { |
1752 | pl330->events[ev] = thrd->id; |
1753 | return ev; |
1754 | } |
1755 | |
1756 | return -1; |
1757 | } |
1758 | |
1759 | static bool _chan_ns(const struct pl330_dmac *pl330, int i) |
1760 | { |
1761 | return pl330->pcfg.irq_ns & (1 << i); |
1762 | } |
1763 | |
1764 | /* Upon success, returns IdentityToken for the |
1765 | * allocated channel, NULL otherwise. |
1766 | */ |
1767 | static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) |
1768 | { |
1769 | struct pl330_thread *thrd = NULL; |
1770 | int chans, i; |
1771 | |
1772 | if (pl330->state == DYING) |
1773 | return NULL; |
1774 | |
1775 | chans = pl330->pcfg.num_chan; |
1776 | |
1777 | for (i = 0; i < chans; i++) { |
1778 | thrd = &pl330->channels[i]; |
1779 | if ((thrd->free) && (!_manager_ns(thrd) || |
1780 | _chan_ns(pl330, i))) { |
1781 | thrd->ev = _alloc_event(thrd); |
1782 | if (thrd->ev >= 0) { |
1783 | thrd->free = false; |
1784 | thrd->lstenq = 1; |
1785 | thrd->req[0].desc = NULL; |
1786 | thrd->req[1].desc = NULL; |
1787 | thrd->req_running = -1; |
1788 | break; |
1789 | } |
1790 | } |
1791 | thrd = NULL; |
1792 | } |
1793 | |
1794 | return thrd; |
1795 | } |
1796 | |
1797 | /* Release an event */ |
1798 | static inline void _free_event(struct pl330_thread *thrd, int ev) |
1799 | { |
1800 | struct pl330_dmac *pl330 = thrd->dmac; |
1801 | |
1802 | /* If the event is valid and was held by the thread */ |
1803 | if (ev >= 0 && ev < pl330->pcfg.num_events |
1804 | && pl330->events[ev] == thrd->id) |
1805 | pl330->events[ev] = -1; |
1806 | } |
1807 | |
1808 | static void pl330_release_channel(struct pl330_thread *thrd) |
1809 | { |
1810 | if (!thrd || thrd->free) |
1811 | return; |
1812 | |
1813 | _stop(thrd); |
1814 | |
1815 | dma_pl330_rqcb(desc: thrd->req[1 - thrd->lstenq].desc, err: PL330_ERR_ABORT); |
1816 | dma_pl330_rqcb(desc: thrd->req[thrd->lstenq].desc, err: PL330_ERR_ABORT); |
1817 | |
1818 | _free_event(thrd, ev: thrd->ev); |
1819 | thrd->free = true; |
1820 | } |
1821 | |
1822 | /* Initialize the structure for PL330 configuration, that can be used |
1823 | * by the client driver the make best use of the DMAC |
1824 | */ |
1825 | static void read_dmac_config(struct pl330_dmac *pl330) |
1826 | { |
1827 | void __iomem *regs = pl330->base; |
1828 | u32 val; |
1829 | |
1830 | val = readl(addr: regs + CRD) >> CRD_DATA_WIDTH_SHIFT; |
1831 | val &= CRD_DATA_WIDTH_MASK; |
1832 | pl330->pcfg.data_bus_width = 8 * (1 << val); |
1833 | |
1834 | val = readl(addr: regs + CRD) >> CRD_DATA_BUFF_SHIFT; |
1835 | val &= CRD_DATA_BUFF_MASK; |
1836 | pl330->pcfg.data_buf_dep = val + 1; |
1837 | |
1838 | val = readl(addr: regs + CR0) >> CR0_NUM_CHANS_SHIFT; |
1839 | val &= CR0_NUM_CHANS_MASK; |
1840 | val += 1; |
1841 | pl330->pcfg.num_chan = val; |
1842 | |
1843 | val = readl(addr: regs + CR0); |
1844 | if (val & CR0_PERIPH_REQ_SET) { |
1845 | val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; |
1846 | val += 1; |
1847 | pl330->pcfg.num_peri = val; |
1848 | pl330->pcfg.peri_ns = readl(addr: regs + CR4); |
1849 | } else { |
1850 | pl330->pcfg.num_peri = 0; |
1851 | } |
1852 | |
1853 | val = readl(addr: regs + CR0); |
1854 | if (val & CR0_BOOT_MAN_NS) |
1855 | pl330->pcfg.mode |= DMAC_MODE_NS; |
1856 | else |
1857 | pl330->pcfg.mode &= ~DMAC_MODE_NS; |
1858 | |
1859 | val = readl(addr: regs + CR0) >> CR0_NUM_EVENTS_SHIFT; |
1860 | val &= CR0_NUM_EVENTS_MASK; |
1861 | val += 1; |
1862 | pl330->pcfg.num_events = val; |
1863 | |
1864 | pl330->pcfg.irq_ns = readl(addr: regs + CR3); |
1865 | } |
1866 | |
1867 | static inline void _reset_thread(struct pl330_thread *thrd) |
1868 | { |
1869 | struct pl330_dmac *pl330 = thrd->dmac; |
1870 | |
1871 | thrd->req[0].mc_cpu = pl330->mcode_cpu |
1872 | + (thrd->id * pl330->mcbufsz); |
1873 | thrd->req[0].mc_bus = pl330->mcode_bus |
1874 | + (thrd->id * pl330->mcbufsz); |
1875 | thrd->req[0].desc = NULL; |
1876 | |
1877 | thrd->req[1].mc_cpu = thrd->req[0].mc_cpu |
1878 | + pl330->mcbufsz / 2; |
1879 | thrd->req[1].mc_bus = thrd->req[0].mc_bus |
1880 | + pl330->mcbufsz / 2; |
1881 | thrd->req[1].desc = NULL; |
1882 | |
1883 | thrd->req_running = -1; |
1884 | } |
1885 | |
1886 | static int dmac_alloc_threads(struct pl330_dmac *pl330) |
1887 | { |
1888 | int chans = pl330->pcfg.num_chan; |
1889 | struct pl330_thread *thrd; |
1890 | int i; |
1891 | |
1892 | /* Allocate 1 Manager and 'chans' Channel threads */ |
1893 | pl330->channels = kcalloc(n: 1 + chans, size: sizeof(*thrd), |
1894 | GFP_KERNEL); |
1895 | if (!pl330->channels) |
1896 | return -ENOMEM; |
1897 | |
1898 | /* Init Channel threads */ |
1899 | for (i = 0; i < chans; i++) { |
1900 | thrd = &pl330->channels[i]; |
1901 | thrd->id = i; |
1902 | thrd->dmac = pl330; |
1903 | _reset_thread(thrd); |
1904 | thrd->free = true; |
1905 | } |
1906 | |
1907 | /* MANAGER is indexed at the end */ |
1908 | thrd = &pl330->channels[chans]; |
1909 | thrd->id = chans; |
1910 | thrd->dmac = pl330; |
1911 | thrd->free = false; |
1912 | pl330->manager = thrd; |
1913 | |
1914 | return 0; |
1915 | } |
1916 | |
1917 | static int dmac_alloc_resources(struct pl330_dmac *pl330) |
1918 | { |
1919 | int chans = pl330->pcfg.num_chan; |
1920 | int ret; |
1921 | |
1922 | /* |
1923 | * Alloc MicroCode buffer for 'chans' Channel threads. |
1924 | * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) |
1925 | */ |
1926 | pl330->mcode_cpu = dma_alloc_attrs(dev: pl330->ddma.dev, |
1927 | size: chans * pl330->mcbufsz, |
1928 | dma_handle: &pl330->mcode_bus, GFP_KERNEL, |
1929 | DMA_ATTR_PRIVILEGED); |
1930 | if (!pl330->mcode_cpu) { |
1931 | dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", |
1932 | __func__, __LINE__); |
1933 | return -ENOMEM; |
1934 | } |
1935 | |
1936 | ret = dmac_alloc_threads(pl330); |
1937 | if (ret) { |
1938 | dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n", |
1939 | __func__, __LINE__); |
1940 | dma_free_attrs(dev: pl330->ddma.dev, |
1941 | size: chans * pl330->mcbufsz, |
1942 | cpu_addr: pl330->mcode_cpu, dma_handle: pl330->mcode_bus, |
1943 | DMA_ATTR_PRIVILEGED); |
1944 | return ret; |
1945 | } |
1946 | |
1947 | return 0; |
1948 | } |
1949 | |
1950 | static int pl330_add(struct pl330_dmac *pl330) |
1951 | { |
1952 | int i, ret; |
1953 | |
1954 | /* Check if we can handle this DMAC */ |
1955 | if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { |
1956 | dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n", |
1957 | pl330->pcfg.periph_id); |
1958 | return -EINVAL; |
1959 | } |
1960 | |
1961 | /* Read the configuration of the DMAC */ |
1962 | read_dmac_config(pl330); |
1963 | |
1964 | if (pl330->pcfg.num_events == 0) { |
1965 | dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n", |
1966 | __func__, __LINE__); |
1967 | return -EINVAL; |
1968 | } |
1969 | |
1970 | spin_lock_init(&pl330->lock); |
1971 | |
1972 | INIT_LIST_HEAD(list: &pl330->req_done); |
1973 | |
1974 | /* Use default MC buffer size if not provided */ |
1975 | if (!pl330->mcbufsz) |
1976 | pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2; |
1977 | |
1978 | /* Mark all events as free */ |
1979 | for (i = 0; i < pl330->pcfg.num_events; i++) |
1980 | pl330->events[i] = -1; |
1981 | |
1982 | /* Allocate resources needed by the DMAC */ |
1983 | ret = dmac_alloc_resources(pl330); |
1984 | if (ret) { |
1985 | dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n"); |
1986 | return ret; |
1987 | } |
1988 | |
1989 | tasklet_setup(t: &pl330->tasks, callback: pl330_dotask); |
1990 | |
1991 | pl330->state = INIT; |
1992 | |
1993 | return 0; |
1994 | } |
1995 | |
1996 | static int dmac_free_threads(struct pl330_dmac *pl330) |
1997 | { |
1998 | struct pl330_thread *thrd; |
1999 | int i; |
2000 | |
2001 | /* Release Channel threads */ |
2002 | for (i = 0; i < pl330->pcfg.num_chan; i++) { |
2003 | thrd = &pl330->channels[i]; |
2004 | pl330_release_channel(thrd); |
2005 | } |
2006 | |
2007 | /* Free memory */ |
2008 | kfree(objp: pl330->channels); |
2009 | |
2010 | return 0; |
2011 | } |
2012 | |
2013 | static void pl330_del(struct pl330_dmac *pl330) |
2014 | { |
2015 | pl330->state = UNINIT; |
2016 | |
2017 | tasklet_kill(t: &pl330->tasks); |
2018 | |
2019 | /* Free DMAC resources */ |
2020 | dmac_free_threads(pl330); |
2021 | |
2022 | dma_free_attrs(dev: pl330->ddma.dev, |
2023 | size: pl330->pcfg.num_chan * pl330->mcbufsz, cpu_addr: pl330->mcode_cpu, |
2024 | dma_handle: pl330->mcode_bus, DMA_ATTR_PRIVILEGED); |
2025 | } |
2026 | |
2027 | /* forward declaration */ |
2028 | static struct amba_driver pl330_driver; |
2029 | |
2030 | static inline struct dma_pl330_chan * |
2031 | to_pchan(struct dma_chan *ch) |
2032 | { |
2033 | if (!ch) |
2034 | return NULL; |
2035 | |
2036 | return container_of(ch, struct dma_pl330_chan, chan); |
2037 | } |
2038 | |
2039 | static inline struct dma_pl330_desc * |
2040 | to_desc(struct dma_async_tx_descriptor *tx) |
2041 | { |
2042 | return container_of(tx, struct dma_pl330_desc, txd); |
2043 | } |
2044 | |
2045 | static inline void fill_queue(struct dma_pl330_chan *pch) |
2046 | { |
2047 | struct dma_pl330_desc *desc; |
2048 | int ret; |
2049 | |
2050 | list_for_each_entry(desc, &pch->work_list, node) { |
2051 | |
2052 | /* If already submitted */ |
2053 | if (desc->status == BUSY || desc->status == PAUSED) |
2054 | continue; |
2055 | |
2056 | ret = pl330_submit_req(thrd: pch->thread, desc); |
2057 | if (!ret) { |
2058 | desc->status = BUSY; |
2059 | } else if (ret == -EAGAIN) { |
2060 | /* QFull or DMAC Dying */ |
2061 | break; |
2062 | } else { |
2063 | /* Unacceptable request */ |
2064 | desc->status = DONE; |
2065 | dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n", |
2066 | __func__, __LINE__, desc->txd.cookie); |
2067 | tasklet_schedule(t: &pch->task); |
2068 | } |
2069 | } |
2070 | } |
2071 | |
2072 | static void pl330_tasklet(struct tasklet_struct *t) |
2073 | { |
2074 | struct dma_pl330_chan *pch = from_tasklet(pch, t, task); |
2075 | struct dma_pl330_desc *desc, *_dt; |
2076 | unsigned long flags; |
2077 | bool power_down = false; |
2078 | |
2079 | spin_lock_irqsave(&pch->lock, flags); |
2080 | |
2081 | /* Pick up ripe tomatoes */ |
2082 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) |
2083 | if (desc->status == DONE) { |
2084 | if (!pch->cyclic) |
2085 | dma_cookie_complete(tx: &desc->txd); |
2086 | list_move_tail(list: &desc->node, head: &pch->completed_list); |
2087 | } |
2088 | |
2089 | /* Try to submit a req imm. next to the last completed cookie */ |
2090 | fill_queue(pch); |
2091 | |
2092 | if (list_empty(head: &pch->work_list)) { |
2093 | spin_lock(lock: &pch->thread->dmac->lock); |
2094 | _stop(thrd: pch->thread); |
2095 | spin_unlock(lock: &pch->thread->dmac->lock); |
2096 | power_down = true; |
2097 | pch->active = false; |
2098 | } else { |
2099 | /* Make sure the PL330 Channel thread is active */ |
2100 | spin_lock(lock: &pch->thread->dmac->lock); |
2101 | pl330_start_thread(thrd: pch->thread); |
2102 | spin_unlock(lock: &pch->thread->dmac->lock); |
2103 | } |
2104 | |
2105 | while (!list_empty(head: &pch->completed_list)) { |
2106 | struct dmaengine_desc_callback cb; |
2107 | |
2108 | desc = list_first_entry(&pch->completed_list, |
2109 | struct dma_pl330_desc, node); |
2110 | |
2111 | dmaengine_desc_get_callback(tx: &desc->txd, cb: &cb); |
2112 | |
2113 | if (pch->cyclic) { |
2114 | desc->status = PREP; |
2115 | list_move_tail(list: &desc->node, head: &pch->work_list); |
2116 | if (power_down) { |
2117 | pch->active = true; |
2118 | spin_lock(lock: &pch->thread->dmac->lock); |
2119 | pl330_start_thread(thrd: pch->thread); |
2120 | spin_unlock(lock: &pch->thread->dmac->lock); |
2121 | power_down = false; |
2122 | } |
2123 | } else { |
2124 | desc->status = FREE; |
2125 | list_move_tail(list: &desc->node, head: &pch->dmac->desc_pool); |
2126 | } |
2127 | |
2128 | dma_descriptor_unmap(tx: &desc->txd); |
2129 | |
2130 | if (dmaengine_desc_callback_valid(cb: &cb)) { |
2131 | spin_unlock_irqrestore(lock: &pch->lock, flags); |
2132 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
2133 | spin_lock_irqsave(&pch->lock, flags); |
2134 | } |
2135 | } |
2136 | spin_unlock_irqrestore(lock: &pch->lock, flags); |
2137 | |
2138 | /* If work list empty, power down */ |
2139 | if (power_down) { |
2140 | pm_runtime_mark_last_busy(dev: pch->dmac->ddma.dev); |
2141 | pm_runtime_put_autosuspend(dev: pch->dmac->ddma.dev); |
2142 | } |
2143 | } |
2144 | |
2145 | static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, |
2146 | struct of_dma *ofdma) |
2147 | { |
2148 | int count = dma_spec->args_count; |
2149 | struct pl330_dmac *pl330 = ofdma->of_dma_data; |
2150 | unsigned int chan_id; |
2151 | |
2152 | if (!pl330) |
2153 | return NULL; |
2154 | |
2155 | if (count != 1) |
2156 | return NULL; |
2157 | |
2158 | chan_id = dma_spec->args[0]; |
2159 | if (chan_id >= pl330->num_peripherals) |
2160 | return NULL; |
2161 | |
2162 | return dma_get_slave_channel(chan: &pl330->peripherals[chan_id].chan); |
2163 | } |
2164 | |
2165 | static int pl330_alloc_chan_resources(struct dma_chan *chan) |
2166 | { |
2167 | struct dma_pl330_chan *pch = to_pchan(ch: chan); |
2168 | struct pl330_dmac *pl330 = pch->dmac; |
2169 | unsigned long flags; |
2170 | |
2171 | spin_lock_irqsave(&pl330->lock, flags); |
2172 | |
2173 | dma_cookie_init(chan); |
2174 | pch->cyclic = false; |
2175 | |
2176 | pch->thread = pl330_request_channel(pl330); |
2177 | if (!pch->thread) { |
2178 | spin_unlock_irqrestore(lock: &pl330->lock, flags); |
2179 | return -ENOMEM; |
2180 | } |
2181 | |
2182 | tasklet_setup(t: &pch->task, callback: pl330_tasklet); |
2183 | |
2184 | spin_unlock_irqrestore(lock: &pl330->lock, flags); |
2185 | |
2186 | return 1; |
2187 | } |
2188 | |
2189 | /* |
2190 | * We need the data direction between the DMAC (the dma-mapping "device") and |
2191 | * the FIFO (the dmaengine "dev"), from the FIFO's point of view. Confusing! |
2192 | */ |
2193 | static enum dma_data_direction |
2194 | pl330_dma_slave_map_dir(enum dma_transfer_direction dir) |
2195 | { |
2196 | switch (dir) { |
2197 | case DMA_MEM_TO_DEV: |
2198 | return DMA_FROM_DEVICE; |
2199 | case DMA_DEV_TO_MEM: |
2200 | return DMA_TO_DEVICE; |
2201 | case DMA_DEV_TO_DEV: |
2202 | return DMA_BIDIRECTIONAL; |
2203 | default: |
2204 | return DMA_NONE; |
2205 | } |
2206 | } |
2207 | |
2208 | static void pl330_unprep_slave_fifo(struct dma_pl330_chan *pch) |
2209 | { |
2210 | if (pch->dir != DMA_NONE) |
2211 | dma_unmap_resource(dev: pch->chan.device->dev, addr: pch->fifo_dma, |
2212 | size: 1 << pch->burst_sz, dir: pch->dir, attrs: 0); |
2213 | pch->dir = DMA_NONE; |
2214 | } |
2215 | |
2216 | |
2217 | static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch, |
2218 | enum dma_transfer_direction dir) |
2219 | { |
2220 | struct device *dev = pch->chan.device->dev; |
2221 | enum dma_data_direction dma_dir = pl330_dma_slave_map_dir(dir); |
2222 | |
2223 | /* Already mapped for this config? */ |
2224 | if (pch->dir == dma_dir) |
2225 | return true; |
2226 | |
2227 | pl330_unprep_slave_fifo(pch); |
2228 | pch->fifo_dma = dma_map_resource(dev, phys_addr: pch->fifo_addr, |
2229 | size: 1 << pch->burst_sz, dir: dma_dir, attrs: 0); |
2230 | if (dma_mapping_error(dev, dma_addr: pch->fifo_dma)) |
2231 | return false; |
2232 | |
2233 | pch->dir = dma_dir; |
2234 | return true; |
2235 | } |
2236 | |
2237 | static int fixup_burst_len(int max_burst_len, int quirks) |
2238 | { |
2239 | if (max_burst_len > PL330_MAX_BURST) |
2240 | return PL330_MAX_BURST; |
2241 | else if (max_burst_len < 1) |
2242 | return 1; |
2243 | else |
2244 | return max_burst_len; |
2245 | } |
2246 | |
2247 | static int pl330_config_write(struct dma_chan *chan, |
2248 | struct dma_slave_config *slave_config, |
2249 | enum dma_transfer_direction direction) |
2250 | { |
2251 | struct dma_pl330_chan *pch = to_pchan(ch: chan); |
2252 | |
2253 | pl330_unprep_slave_fifo(pch); |
2254 | if (direction == DMA_MEM_TO_DEV) { |
2255 | if (slave_config->dst_addr) |
2256 | pch->fifo_addr = slave_config->dst_addr; |
2257 | if (slave_config->dst_addr_width) |
2258 | pch->burst_sz = __ffs(slave_config->dst_addr_width); |
2259 | pch->burst_len = fixup_burst_len(max_burst_len: slave_config->dst_maxburst, |
2260 | quirks: pch->dmac->quirks); |
2261 | } else if (direction == DMA_DEV_TO_MEM) { |
2262 | if (slave_config->src_addr) |
2263 | pch->fifo_addr = slave_config->src_addr; |
2264 | if (slave_config->src_addr_width) |
2265 | pch->burst_sz = __ffs(slave_config->src_addr_width); |
2266 | pch->burst_len = fixup_burst_len(max_burst_len: slave_config->src_maxburst, |
2267 | quirks: pch->dmac->quirks); |
2268 | } |
2269 | |
2270 | return 0; |
2271 | } |
2272 | |
2273 | static int pl330_config(struct dma_chan *chan, |
2274 | struct dma_slave_config *slave_config) |
2275 | { |
2276 | struct dma_pl330_chan *pch = to_pchan(ch: chan); |
2277 | |
2278 | memcpy(&pch->slave_config, slave_config, sizeof(*slave_config)); |
2279 | |
2280 | return 0; |
2281 | } |
2282 | |
2283 | static int pl330_terminate_all(struct dma_chan *chan) |
2284 | { |
2285 | struct dma_pl330_chan *pch = to_pchan(ch: chan); |
2286 | struct dma_pl330_desc *desc; |
2287 | unsigned long flags; |
2288 | struct pl330_dmac *pl330 = pch->dmac; |
2289 | bool power_down = false; |
2290 | |
2291 | pm_runtime_get_sync(dev: pl330->ddma.dev); |
2292 | spin_lock_irqsave(&pch->lock, flags); |
2293 | |
2294 | spin_lock(lock: &pl330->lock); |
2295 | _stop(thrd: pch->thread); |
2296 | pch->thread->req[0].desc = NULL; |
2297 | pch->thread->req[1].desc = NULL; |
2298 | pch->thread->req_running = -1; |
2299 | spin_unlock(lock: &pl330->lock); |
2300 | |
2301 | power_down = pch->active; |
2302 | pch->active = false; |
2303 | |
2304 | /* Mark all desc done */ |
2305 | list_for_each_entry(desc, &pch->submitted_list, node) { |
2306 | desc->status = FREE; |
2307 | dma_cookie_complete(tx: &desc->txd); |
2308 | } |
2309 | |
2310 | list_for_each_entry(desc, &pch->work_list , node) { |
2311 | desc->status = FREE; |
2312 | dma_cookie_complete(tx: &desc->txd); |
2313 | } |
2314 | |
2315 | list_splice_tail_init(list: &pch->submitted_list, head: &pl330->desc_pool); |
2316 | list_splice_tail_init(list: &pch->work_list, head: &pl330->desc_pool); |
2317 | list_splice_tail_init(list: &pch->completed_list, head: &pl330->desc_pool); |
2318 | spin_unlock_irqrestore(lock: &pch->lock, flags); |
2319 | pm_runtime_mark_last_busy(dev: pl330->ddma.dev); |
2320 | if (power_down) |
2321 | pm_runtime_put_autosuspend(dev: pl330->ddma.dev); |
2322 | pm_runtime_put_autosuspend(dev: pl330->ddma.dev); |
2323 | |
2324 | return 0; |
2325 | } |
2326 | |
2327 | /* |
2328 | * We don't support DMA_RESUME command because of hardware |
2329 | * limitations, so after pausing the channel we cannot restore |
2330 | * it to active state. We have to terminate channel and setup |
2331 | * DMA transfer again. This pause feature was implemented to |
2332 | * allow safely read residue before channel termination. |
2333 | */ |
2334 | static int pl330_pause(struct dma_chan *chan) |
2335 | { |
2336 | struct dma_pl330_chan *pch = to_pchan(ch: chan); |
2337 | struct pl330_dmac *pl330 = pch->dmac; |
2338 | struct dma_pl330_desc *desc; |
2339 | unsigned long flags; |
2340 | |
2341 | pm_runtime_get_sync(dev: pl330->ddma.dev); |
2342 | spin_lock_irqsave(&pch->lock, flags); |
2343 | |
2344 | spin_lock(lock: &pl330->lock); |
2345 | _stop(thrd: pch->thread); |
2346 | spin_unlock(lock: &pl330->lock); |
2347 | |
2348 | list_for_each_entry(desc, &pch->work_list, node) { |
2349 | if (desc->status == BUSY) |
2350 | desc->status = PAUSED; |
2351 | } |
2352 | spin_unlock_irqrestore(lock: &pch->lock, flags); |
2353 | pm_runtime_mark_last_busy(dev: pl330->ddma.dev); |
2354 | pm_runtime_put_autosuspend(dev: pl330->ddma.dev); |
2355 | |
2356 | return 0; |
2357 | } |
2358 | |
2359 | static void pl330_free_chan_resources(struct dma_chan *chan) |
2360 | { |
2361 | struct dma_pl330_chan *pch = to_pchan(ch: chan); |
2362 | struct pl330_dmac *pl330 = pch->dmac; |
2363 | unsigned long flags; |
2364 | |
2365 | tasklet_kill(t: &pch->task); |
2366 | |
2367 | pm_runtime_get_sync(dev: pch->dmac->ddma.dev); |
2368 | spin_lock_irqsave(&pl330->lock, flags); |
2369 | |
2370 | pl330_release_channel(thrd: pch->thread); |
2371 | pch->thread = NULL; |
2372 | |
2373 | if (pch->cyclic) |
2374 | list_splice_tail_init(list: &pch->work_list, head: &pch->dmac->desc_pool); |
2375 | |
2376 | spin_unlock_irqrestore(lock: &pl330->lock, flags); |
2377 | pm_runtime_mark_last_busy(dev: pch->dmac->ddma.dev); |
2378 | pm_runtime_put_autosuspend(dev: pch->dmac->ddma.dev); |
2379 | pl330_unprep_slave_fifo(pch); |
2380 | } |
2381 | |
2382 | static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch, |
2383 | struct dma_pl330_desc *desc) |
2384 | { |
2385 | struct pl330_thread *thrd = pch->thread; |
2386 | struct pl330_dmac *pl330 = pch->dmac; |
2387 | void __iomem *regs = thrd->dmac->base; |
2388 | u32 val, addr; |
2389 | |
2390 | pm_runtime_get_sync(dev: pl330->ddma.dev); |
2391 | val = addr = 0; |
2392 | if (desc->rqcfg.src_inc) { |
2393 | val = readl(addr: regs + SA(thrd->id)); |
2394 | addr = desc->px.src_addr; |
2395 | } else { |
2396 | val = readl(addr: regs + DA(thrd->id)); |
2397 | addr = desc->px.dst_addr; |
2398 | } |
2399 | pm_runtime_mark_last_busy(dev: pch->dmac->ddma.dev); |
2400 | pm_runtime_put_autosuspend(dev: pl330->ddma.dev); |
2401 | |
2402 | /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */ |
2403 | if (!val) |
2404 | return 0; |
2405 | |
2406 | return val - addr; |
2407 | } |
2408 | |
2409 | static enum dma_status |
2410 | pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
2411 | struct dma_tx_state *txstate) |
2412 | { |
2413 | enum dma_status ret; |
2414 | unsigned long flags; |
2415 | struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL; |
2416 | struct dma_pl330_chan *pch = to_pchan(ch: chan); |
2417 | unsigned int transferred, residual = 0; |
2418 | |
2419 | ret = dma_cookie_status(chan, cookie, state: txstate); |
2420 | |
2421 | if (!txstate) |
2422 | return ret; |
2423 | |
2424 | if (ret == DMA_COMPLETE) |
2425 | goto out; |
2426 | |
2427 | spin_lock_irqsave(&pch->lock, flags); |
2428 | spin_lock(lock: &pch->thread->dmac->lock); |
2429 | |
2430 | if (pch->thread->req_running != -1) |
2431 | running = pch->thread->req[pch->thread->req_running].desc; |
2432 | |
2433 | last_enq = pch->thread->req[pch->thread->lstenq].desc; |
2434 | |
2435 | /* Check in pending list */ |
2436 | list_for_each_entry(desc, &pch->work_list, node) { |
2437 | if (desc->status == DONE) |
2438 | transferred = desc->bytes_requested; |
2439 | else if (running && desc == running) |
2440 | transferred = |
2441 | pl330_get_current_xferred_count(pch, desc); |
2442 | else if (desc->status == BUSY || desc->status == PAUSED) |
2443 | /* |
2444 | * Busy but not running means either just enqueued, |
2445 | * or finished and not yet marked done |
2446 | */ |
2447 | if (desc == last_enq) |
2448 | transferred = 0; |
2449 | else |
2450 | transferred = desc->bytes_requested; |
2451 | else |
2452 | transferred = 0; |
2453 | residual += desc->bytes_requested - transferred; |
2454 | if (desc->txd.cookie == cookie) { |
2455 | switch (desc->status) { |
2456 | case DONE: |
2457 | ret = DMA_COMPLETE; |
2458 | break; |
2459 | case PAUSED: |
2460 | ret = DMA_PAUSED; |
2461 | break; |
2462 | case PREP: |
2463 | case BUSY: |
2464 | ret = DMA_IN_PROGRESS; |
2465 | break; |
2466 | default: |
2467 | WARN_ON(1); |
2468 | } |
2469 | break; |
2470 | } |
2471 | if (desc->last) |
2472 | residual = 0; |
2473 | } |
2474 | spin_unlock(lock: &pch->thread->dmac->lock); |
2475 | spin_unlock_irqrestore(lock: &pch->lock, flags); |
2476 | |
2477 | out: |
2478 | dma_set_residue(state: txstate, residue: residual); |
2479 | |
2480 | return ret; |
2481 | } |
2482 | |
2483 | static void pl330_issue_pending(struct dma_chan *chan) |
2484 | { |
2485 | struct dma_pl330_chan *pch = to_pchan(ch: chan); |
2486 | unsigned long flags; |
2487 | |
2488 | spin_lock_irqsave(&pch->lock, flags); |
2489 | if (list_empty(head: &pch->work_list)) { |
2490 | /* |
2491 | * Warn on nothing pending. Empty submitted_list may |
2492 | * break our pm_runtime usage counter as it is |
2493 | * updated on work_list emptiness status. |
2494 | */ |
2495 | WARN_ON(list_empty(&pch->submitted_list)); |
2496 | pch->active = true; |
2497 | pm_runtime_get_sync(dev: pch->dmac->ddma.dev); |
2498 | } |
2499 | list_splice_tail_init(list: &pch->submitted_list, head: &pch->work_list); |
2500 | spin_unlock_irqrestore(lock: &pch->lock, flags); |
2501 | |
2502 | pl330_tasklet(t: &pch->task); |
2503 | } |
2504 | |
2505 | /* |
2506 | * We returned the last one of the circular list of descriptor(s) |
2507 | * from prep_xxx, so the argument to submit corresponds to the last |
2508 | * descriptor of the list. |
2509 | */ |
2510 | static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) |
2511 | { |
2512 | struct dma_pl330_desc *desc, *last = to_desc(tx); |
2513 | struct dma_pl330_chan *pch = to_pchan(ch: tx->chan); |
2514 | dma_cookie_t cookie; |
2515 | unsigned long flags; |
2516 | |
2517 | spin_lock_irqsave(&pch->lock, flags); |
2518 | |
2519 | /* Assign cookies to all nodes */ |
2520 | while (!list_empty(head: &last->node)) { |
2521 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); |
2522 | if (pch->cyclic) { |
2523 | desc->txd.callback = last->txd.callback; |
2524 | desc->txd.callback_param = last->txd.callback_param; |
2525 | } |
2526 | desc->last = false; |
2527 | |
2528 | dma_cookie_assign(tx: &desc->txd); |
2529 | |
2530 | list_move_tail(list: &desc->node, head: &pch->submitted_list); |
2531 | } |
2532 | |
2533 | last->last = true; |
2534 | cookie = dma_cookie_assign(tx: &last->txd); |
2535 | list_add_tail(new: &last->node, head: &pch->submitted_list); |
2536 | spin_unlock_irqrestore(lock: &pch->lock, flags); |
2537 | |
2538 | return cookie; |
2539 | } |
2540 | |
2541 | static inline void _init_desc(struct dma_pl330_desc *desc) |
2542 | { |
2543 | desc->rqcfg.swap = SWAP_NO; |
2544 | desc->rqcfg.scctl = CCTRL0; |
2545 | desc->rqcfg.dcctl = CCTRL0; |
2546 | desc->txd.tx_submit = pl330_tx_submit; |
2547 | |
2548 | INIT_LIST_HEAD(list: &desc->node); |
2549 | } |
2550 | |
2551 | /* Returns the number of descriptors added to the DMAC pool */ |
2552 | static int add_desc(struct list_head *pool, spinlock_t *lock, |
2553 | gfp_t flg, int count) |
2554 | { |
2555 | struct dma_pl330_desc *desc; |
2556 | unsigned long flags; |
2557 | int i; |
2558 | |
2559 | desc = kcalloc(n: count, size: sizeof(*desc), flags: flg); |
2560 | if (!desc) |
2561 | return 0; |
2562 | |
2563 | spin_lock_irqsave(lock, flags); |
2564 | |
2565 | for (i = 0; i < count; i++) { |
2566 | _init_desc(desc: &desc[i]); |
2567 | list_add_tail(new: &desc[i].node, head: pool); |
2568 | } |
2569 | |
2570 | spin_unlock_irqrestore(lock, flags); |
2571 | |
2572 | return count; |
2573 | } |
2574 | |
2575 | static struct dma_pl330_desc *pluck_desc(struct list_head *pool, |
2576 | spinlock_t *lock) |
2577 | { |
2578 | struct dma_pl330_desc *desc = NULL; |
2579 | unsigned long flags; |
2580 | |
2581 | spin_lock_irqsave(lock, flags); |
2582 | |
2583 | if (!list_empty(head: pool)) { |
2584 | desc = list_entry(pool->next, |
2585 | struct dma_pl330_desc, node); |
2586 | |
2587 | list_del_init(entry: &desc->node); |
2588 | |
2589 | desc->status = PREP; |
2590 | desc->txd.callback = NULL; |
2591 | desc->txd.callback_result = NULL; |
2592 | } |
2593 | |
2594 | spin_unlock_irqrestore(lock, flags); |
2595 | |
2596 | return desc; |
2597 | } |
2598 | |
2599 | static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) |
2600 | { |
2601 | struct pl330_dmac *pl330 = pch->dmac; |
2602 | u8 *peri_id = pch->chan.private; |
2603 | struct dma_pl330_desc *desc; |
2604 | |
2605 | /* Pluck one desc from the pool of DMAC */ |
2606 | desc = pluck_desc(pool: &pl330->desc_pool, lock: &pl330->pool_lock); |
2607 | |
2608 | /* If the DMAC pool is empty, alloc new */ |
2609 | if (!desc) { |
2610 | static DEFINE_SPINLOCK(lock); |
2611 | LIST_HEAD(pool); |
2612 | |
2613 | if (!add_desc(pool: &pool, lock: &lock, GFP_ATOMIC, count: 1)) |
2614 | return NULL; |
2615 | |
2616 | desc = pluck_desc(pool: &pool, lock: &lock); |
2617 | WARN_ON(!desc || !list_empty(&pool)); |
2618 | } |
2619 | |
2620 | /* Initialize the descriptor */ |
2621 | desc->pchan = pch; |
2622 | desc->txd.cookie = 0; |
2623 | async_tx_ack(tx: &desc->txd); |
2624 | |
2625 | desc->peri = peri_id ? pch->chan.chan_id : 0; |
2626 | desc->rqcfg.pcfg = &pch->dmac->pcfg; |
2627 | |
2628 | dma_async_tx_descriptor_init(tx: &desc->txd, chan: &pch->chan); |
2629 | |
2630 | return desc; |
2631 | } |
2632 | |
2633 | static inline void fill_px(struct pl330_xfer *px, |
2634 | dma_addr_t dst, dma_addr_t src, size_t len) |
2635 | { |
2636 | px->bytes = len; |
2637 | px->dst_addr = dst; |
2638 | px->src_addr = src; |
2639 | } |
2640 | |
2641 | static struct dma_pl330_desc * |
2642 | __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, |
2643 | dma_addr_t src, size_t len) |
2644 | { |
2645 | struct dma_pl330_desc *desc = pl330_get_desc(pch); |
2646 | |
2647 | if (!desc) { |
2648 | dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", |
2649 | __func__, __LINE__); |
2650 | return NULL; |
2651 | } |
2652 | |
2653 | /* |
2654 | * Ideally we should lookout for reqs bigger than |
2655 | * those that can be programmed with 256 bytes of |
2656 | * MC buffer, but considering a req size is seldom |
2657 | * going to be word-unaligned and more than 200MB, |
2658 | * we take it easy. |
2659 | * Also, should the limit is reached we'd rather |
2660 | * have the platform increase MC buffer size than |
2661 | * complicating this API driver. |
2662 | */ |
2663 | fill_px(px: &desc->px, dst, src, len); |
2664 | |
2665 | return desc; |
2666 | } |
2667 | |
2668 | /* Call after fixing burst size */ |
2669 | static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) |
2670 | { |
2671 | struct dma_pl330_chan *pch = desc->pchan; |
2672 | struct pl330_dmac *pl330 = pch->dmac; |
2673 | int burst_len; |
2674 | |
2675 | burst_len = pl330->pcfg.data_bus_width / 8; |
2676 | burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan; |
2677 | burst_len >>= desc->rqcfg.brst_size; |
2678 | |
2679 | /* src/dst_burst_len can't be more than 16 */ |
2680 | if (burst_len > PL330_MAX_BURST) |
2681 | burst_len = PL330_MAX_BURST; |
2682 | |
2683 | return burst_len; |
2684 | } |
2685 | |
2686 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( |
2687 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, |
2688 | size_t period_len, enum dma_transfer_direction direction, |
2689 | unsigned long flags) |
2690 | { |
2691 | struct dma_pl330_desc *desc = NULL, *first = NULL; |
2692 | struct dma_pl330_chan *pch = to_pchan(ch: chan); |
2693 | struct pl330_dmac *pl330 = pch->dmac; |
2694 | unsigned int i; |
2695 | dma_addr_t dst; |
2696 | dma_addr_t src; |
2697 | |
2698 | if (len % period_len != 0) |
2699 | return NULL; |
2700 | |
2701 | if (!is_slave_direction(direction)) { |
2702 | dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n", |
2703 | __func__, __LINE__); |
2704 | return NULL; |
2705 | } |
2706 | |
2707 | pl330_config_write(chan, slave_config: &pch->slave_config, direction); |
2708 | |
2709 | if (!pl330_prep_slave_fifo(pch, dir: direction)) |
2710 | return NULL; |
2711 | |
2712 | for (i = 0; i < len / period_len; i++) { |
2713 | desc = pl330_get_desc(pch); |
2714 | if (!desc) { |
2715 | unsigned long iflags; |
2716 | |
2717 | dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", |
2718 | __func__, __LINE__); |
2719 | |
2720 | if (!first) |
2721 | return NULL; |
2722 | |
2723 | spin_lock_irqsave(&pl330->pool_lock, iflags); |
2724 | |
2725 | while (!list_empty(head: &first->node)) { |
2726 | desc = list_entry(first->node.next, |
2727 | struct dma_pl330_desc, node); |
2728 | list_move_tail(list: &desc->node, head: &pl330->desc_pool); |
2729 | } |
2730 | |
2731 | list_move_tail(list: &first->node, head: &pl330->desc_pool); |
2732 | |
2733 | spin_unlock_irqrestore(lock: &pl330->pool_lock, flags: iflags); |
2734 | |
2735 | return NULL; |
2736 | } |
2737 | |
2738 | switch (direction) { |
2739 | case DMA_MEM_TO_DEV: |
2740 | desc->rqcfg.src_inc = 1; |
2741 | desc->rqcfg.dst_inc = 0; |
2742 | src = dma_addr; |
2743 | dst = pch->fifo_dma; |
2744 | break; |
2745 | case DMA_DEV_TO_MEM: |
2746 | desc->rqcfg.src_inc = 0; |
2747 | desc->rqcfg.dst_inc = 1; |
2748 | src = pch->fifo_dma; |
2749 | dst = dma_addr; |
2750 | break; |
2751 | default: |
2752 | break; |
2753 | } |
2754 | |
2755 | desc->rqtype = direction; |
2756 | desc->rqcfg.brst_size = pch->burst_sz; |
2757 | desc->rqcfg.brst_len = pch->burst_len; |
2758 | desc->bytes_requested = period_len; |
2759 | fill_px(px: &desc->px, dst, src, len: period_len); |
2760 | |
2761 | if (!first) |
2762 | first = desc; |
2763 | else |
2764 | list_add_tail(new: &desc->node, head: &first->node); |
2765 | |
2766 | dma_addr += period_len; |
2767 | } |
2768 | |
2769 | if (!desc) |
2770 | return NULL; |
2771 | |
2772 | pch->cyclic = true; |
2773 | |
2774 | return &desc->txd; |
2775 | } |
2776 | |
2777 | static struct dma_async_tx_descriptor * |
2778 | pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, |
2779 | dma_addr_t src, size_t len, unsigned long flags) |
2780 | { |
2781 | struct dma_pl330_desc *desc; |
2782 | struct dma_pl330_chan *pch = to_pchan(ch: chan); |
2783 | struct pl330_dmac *pl330; |
2784 | int burst; |
2785 | |
2786 | if (unlikely(!pch || !len)) |
2787 | return NULL; |
2788 | |
2789 | pl330 = pch->dmac; |
2790 | |
2791 | desc = __pl330_prep_dma_memcpy(pch, dst, src, len); |
2792 | if (!desc) |
2793 | return NULL; |
2794 | |
2795 | desc->rqcfg.src_inc = 1; |
2796 | desc->rqcfg.dst_inc = 1; |
2797 | desc->rqtype = DMA_MEM_TO_MEM; |
2798 | |
2799 | /* Select max possible burst size */ |
2800 | burst = pl330->pcfg.data_bus_width / 8; |
2801 | |
2802 | /* |
2803 | * Make sure we use a burst size that aligns with all the memcpy |
2804 | * parameters because our DMA programming algorithm doesn't cope with |
2805 | * transfers which straddle an entry in the DMA device's MFIFO. |
2806 | */ |
2807 | while ((src | dst | len) & (burst - 1)) |
2808 | burst /= 2; |
2809 | |
2810 | desc->rqcfg.brst_size = 0; |
2811 | while (burst != (1 << desc->rqcfg.brst_size)) |
2812 | desc->rqcfg.brst_size++; |
2813 | |
2814 | desc->rqcfg.brst_len = get_burst_len(desc, len); |
2815 | /* |
2816 | * If burst size is smaller than bus width then make sure we only |
2817 | * transfer one at a time to avoid a burst stradling an MFIFO entry. |
2818 | */ |
2819 | if (burst * 8 < pl330->pcfg.data_bus_width) |
2820 | desc->rqcfg.brst_len = 1; |
2821 | |
2822 | desc->bytes_requested = len; |
2823 | |
2824 | return &desc->txd; |
2825 | } |
2826 | |
2827 | static void __pl330_giveback_desc(struct pl330_dmac *pl330, |
2828 | struct dma_pl330_desc *first) |
2829 | { |
2830 | unsigned long flags; |
2831 | struct dma_pl330_desc *desc; |
2832 | |
2833 | if (!first) |
2834 | return; |
2835 | |
2836 | spin_lock_irqsave(&pl330->pool_lock, flags); |
2837 | |
2838 | while (!list_empty(head: &first->node)) { |
2839 | desc = list_entry(first->node.next, |
2840 | struct dma_pl330_desc, node); |
2841 | list_move_tail(list: &desc->node, head: &pl330->desc_pool); |
2842 | } |
2843 | |
2844 | list_move_tail(list: &first->node, head: &pl330->desc_pool); |
2845 | |
2846 | spin_unlock_irqrestore(lock: &pl330->pool_lock, flags); |
2847 | } |
2848 | |
2849 | static struct dma_async_tx_descriptor * |
2850 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
2851 | unsigned int sg_len, enum dma_transfer_direction direction, |
2852 | unsigned long flg, void *context) |
2853 | { |
2854 | struct dma_pl330_desc *first, *desc = NULL; |
2855 | struct dma_pl330_chan *pch = to_pchan(ch: chan); |
2856 | struct scatterlist *sg; |
2857 | int i; |
2858 | |
2859 | if (unlikely(!pch || !sgl || !sg_len)) |
2860 | return NULL; |
2861 | |
2862 | pl330_config_write(chan, slave_config: &pch->slave_config, direction); |
2863 | |
2864 | if (!pl330_prep_slave_fifo(pch, dir: direction)) |
2865 | return NULL; |
2866 | |
2867 | first = NULL; |
2868 | |
2869 | for_each_sg(sgl, sg, sg_len, i) { |
2870 | |
2871 | desc = pl330_get_desc(pch); |
2872 | if (!desc) { |
2873 | struct pl330_dmac *pl330 = pch->dmac; |
2874 | |
2875 | dev_err(pch->dmac->ddma.dev, |
2876 | "%s:%d Unable to fetch desc\n", |
2877 | __func__, __LINE__); |
2878 | __pl330_giveback_desc(pl330, first); |
2879 | |
2880 | return NULL; |
2881 | } |
2882 | |
2883 | if (!first) |
2884 | first = desc; |
2885 | else |
2886 | list_add_tail(new: &desc->node, head: &first->node); |
2887 | |
2888 | if (direction == DMA_MEM_TO_DEV) { |
2889 | desc->rqcfg.src_inc = 1; |
2890 | desc->rqcfg.dst_inc = 0; |
2891 | fill_px(px: &desc->px, dst: pch->fifo_dma, sg_dma_address(sg), |
2892 | sg_dma_len(sg)); |
2893 | } else { |
2894 | desc->rqcfg.src_inc = 0; |
2895 | desc->rqcfg.dst_inc = 1; |
2896 | fill_px(px: &desc->px, sg_dma_address(sg), src: pch->fifo_dma, |
2897 | sg_dma_len(sg)); |
2898 | } |
2899 | |
2900 | desc->rqcfg.brst_size = pch->burst_sz; |
2901 | desc->rqcfg.brst_len = pch->burst_len; |
2902 | desc->rqtype = direction; |
2903 | desc->bytes_requested = sg_dma_len(sg); |
2904 | } |
2905 | |
2906 | /* Return the last desc in the chain */ |
2907 | return &desc->txd; |
2908 | } |
2909 | |
2910 | static irqreturn_t pl330_irq_handler(int irq, void *data) |
2911 | { |
2912 | if (pl330_update(pl330: data)) |
2913 | return IRQ_HANDLED; |
2914 | else |
2915 | return IRQ_NONE; |
2916 | } |
2917 | |
2918 | #define PL330_DMA_BUSWIDTHS \ |
2919 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ |
2920 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ |
2921 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
2922 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ |
2923 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) |
2924 | |
2925 | #ifdef CONFIG_DEBUG_FS |
2926 | static int pl330_debugfs_show(struct seq_file *s, void *data) |
2927 | { |
2928 | struct pl330_dmac *pl330 = s->private; |
2929 | int chans, pchs, ch, pr; |
2930 | |
2931 | chans = pl330->pcfg.num_chan; |
2932 | pchs = pl330->num_peripherals; |
2933 | |
2934 | seq_puts(m: s, s: "PL330 physical channels:\n"); |
2935 | seq_puts(m: s, s: "THREAD:\t\tCHANNEL:\n"); |
2936 | seq_puts(m: s, s: "--------\t-----\n"); |
2937 | for (ch = 0; ch < chans; ch++) { |
2938 | struct pl330_thread *thrd = &pl330->channels[ch]; |
2939 | int found = -1; |
2940 | |
2941 | for (pr = 0; pr < pchs; pr++) { |
2942 | struct dma_pl330_chan *pch = &pl330->peripherals[pr]; |
2943 | |
2944 | if (!pch->thread || thrd->id != pch->thread->id) |
2945 | continue; |
2946 | |
2947 | found = pr; |
2948 | } |
2949 | |
2950 | seq_printf(m: s, fmt: "%d\t\t", thrd->id); |
2951 | if (found == -1) |
2952 | seq_puts(m: s, s: "--\n"); |
2953 | else |
2954 | seq_printf(m: s, fmt: "%d\n", found); |
2955 | } |
2956 | |
2957 | return 0; |
2958 | } |
2959 | |
2960 | DEFINE_SHOW_ATTRIBUTE(pl330_debugfs); |
2961 | |
2962 | static inline void init_pl330_debugfs(struct pl330_dmac *pl330) |
2963 | { |
2964 | debugfs_create_file(name: dev_name(dev: pl330->ddma.dev), |
2965 | S_IFREG | 0444, NULL, data: pl330, |
2966 | fops: &pl330_debugfs_fops); |
2967 | } |
2968 | #else |
2969 | static inline void init_pl330_debugfs(struct pl330_dmac *pl330) |
2970 | { |
2971 | } |
2972 | #endif |
2973 | |
2974 | /* |
2975 | * Runtime PM callbacks are provided by amba/bus.c driver. |
2976 | * |
2977 | * It is assumed here that IRQ safe runtime PM is chosen in probe and amba |
2978 | * bus driver will only disable/enable the clock in runtime PM callbacks. |
2979 | */ |
2980 | static int __maybe_unused pl330_suspend(struct device *dev) |
2981 | { |
2982 | struct amba_device *pcdev = to_amba_device(dev); |
2983 | |
2984 | pm_runtime_force_suspend(dev); |
2985 | clk_unprepare(clk: pcdev->pclk); |
2986 | |
2987 | return 0; |
2988 | } |
2989 | |
2990 | static int __maybe_unused pl330_resume(struct device *dev) |
2991 | { |
2992 | struct amba_device *pcdev = to_amba_device(dev); |
2993 | int ret; |
2994 | |
2995 | ret = clk_prepare(clk: pcdev->pclk); |
2996 | if (ret) |
2997 | return ret; |
2998 | |
2999 | pm_runtime_force_resume(dev); |
3000 | |
3001 | return ret; |
3002 | } |
3003 | |
3004 | static const struct dev_pm_ops pl330_pm = { |
3005 | SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume) |
3006 | }; |
3007 | |
3008 | static int |
3009 | pl330_probe(struct amba_device *adev, const struct amba_id *id) |
3010 | { |
3011 | struct pl330_config *pcfg; |
3012 | struct pl330_dmac *pl330; |
3013 | struct dma_pl330_chan *pch, *_p; |
3014 | struct dma_device *pd; |
3015 | struct resource *res; |
3016 | int i, ret, irq; |
3017 | int num_chan; |
3018 | struct device_node *np = adev->dev.of_node; |
3019 | |
3020 | ret = dma_set_mask_and_coherent(dev: &adev->dev, DMA_BIT_MASK(32)); |
3021 | if (ret) |
3022 | return ret; |
3023 | |
3024 | /* Allocate a new DMAC and its Channels */ |
3025 | pl330 = devm_kzalloc(dev: &adev->dev, size: sizeof(*pl330), GFP_KERNEL); |
3026 | if (!pl330) |
3027 | return -ENOMEM; |
3028 | |
3029 | pd = &pl330->ddma; |
3030 | pd->dev = &adev->dev; |
3031 | |
3032 | pl330->mcbufsz = 0; |
3033 | |
3034 | /* get quirk */ |
3035 | for (i = 0; i < ARRAY_SIZE(of_quirks); i++) |
3036 | if (of_property_read_bool(np, propname: of_quirks[i].quirk)) |
3037 | pl330->quirks |= of_quirks[i].id; |
3038 | |
3039 | res = &adev->res; |
3040 | pl330->base = devm_ioremap_resource(dev: &adev->dev, res); |
3041 | if (IS_ERR(ptr: pl330->base)) |
3042 | return PTR_ERR(ptr: pl330->base); |
3043 | |
3044 | amba_set_drvdata(adev, pl330); |
3045 | |
3046 | pl330->rstc = devm_reset_control_get_optional(dev: &adev->dev, id: "dma"); |
3047 | if (IS_ERR(ptr: pl330->rstc)) { |
3048 | return dev_err_probe(dev: &adev->dev, err: PTR_ERR(ptr: pl330->rstc), fmt: "Failed to get reset!\n"); |
3049 | } else { |
3050 | ret = reset_control_deassert(rstc: pl330->rstc); |
3051 | if (ret) { |
3052 | dev_err(&adev->dev, "Couldn't deassert the device from reset!\n"); |
3053 | return ret; |
3054 | } |
3055 | } |
3056 | |
3057 | pl330->rstc_ocp = devm_reset_control_get_optional(dev: &adev->dev, id: "dma-ocp"); |
3058 | if (IS_ERR(ptr: pl330->rstc_ocp)) { |
3059 | return dev_err_probe(dev: &adev->dev, err: PTR_ERR(ptr: pl330->rstc_ocp), |
3060 | fmt: "Failed to get OCP reset!\n"); |
3061 | } else { |
3062 | ret = reset_control_deassert(rstc: pl330->rstc_ocp); |
3063 | if (ret) { |
3064 | dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n"); |
3065 | return ret; |
3066 | } |
3067 | } |
3068 | |
3069 | for (i = 0; i < AMBA_NR_IRQS; i++) { |
3070 | irq = adev->irq[i]; |
3071 | if (irq) { |
3072 | ret = devm_request_irq(dev: &adev->dev, irq, |
3073 | handler: pl330_irq_handler, irqflags: 0, |
3074 | devname: dev_name(dev: &adev->dev), dev_id: pl330); |
3075 | if (ret) |
3076 | return ret; |
3077 | } else { |
3078 | break; |
3079 | } |
3080 | } |
3081 | |
3082 | pcfg = &pl330->pcfg; |
3083 | |
3084 | pcfg->periph_id = adev->periphid; |
3085 | ret = pl330_add(pl330); |
3086 | if (ret) |
3087 | return ret; |
3088 | |
3089 | INIT_LIST_HEAD(list: &pl330->desc_pool); |
3090 | spin_lock_init(&pl330->pool_lock); |
3091 | |
3092 | /* Create a descriptor pool of default size */ |
3093 | if (!add_desc(pool: &pl330->desc_pool, lock: &pl330->pool_lock, |
3094 | GFP_KERNEL, NR_DEFAULT_DESC)) |
3095 | dev_warn(&adev->dev, "unable to allocate desc\n"); |
3096 | |
3097 | INIT_LIST_HEAD(list: &pd->channels); |
3098 | |
3099 | /* Initialize channel parameters */ |
3100 | num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan); |
3101 | |
3102 | pl330->num_peripherals = num_chan; |
3103 | |
3104 | pl330->peripherals = kcalloc(n: num_chan, size: sizeof(*pch), GFP_KERNEL); |
3105 | if (!pl330->peripherals) { |
3106 | ret = -ENOMEM; |
3107 | goto probe_err2; |
3108 | } |
3109 | |
3110 | for (i = 0; i < num_chan; i++) { |
3111 | pch = &pl330->peripherals[i]; |
3112 | |
3113 | pch->chan.private = adev->dev.of_node; |
3114 | INIT_LIST_HEAD(list: &pch->submitted_list); |
3115 | INIT_LIST_HEAD(list: &pch->work_list); |
3116 | INIT_LIST_HEAD(list: &pch->completed_list); |
3117 | spin_lock_init(&pch->lock); |
3118 | pch->thread = NULL; |
3119 | pch->chan.device = pd; |
3120 | pch->dmac = pl330; |
3121 | pch->dir = DMA_NONE; |
3122 | |
3123 | /* Add the channel to the DMAC list */ |
3124 | list_add_tail(new: &pch->chan.device_node, head: &pd->channels); |
3125 | } |
3126 | |
3127 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); |
3128 | if (pcfg->num_peri) { |
3129 | dma_cap_set(DMA_SLAVE, pd->cap_mask); |
3130 | dma_cap_set(DMA_CYCLIC, pd->cap_mask); |
3131 | dma_cap_set(DMA_PRIVATE, pd->cap_mask); |
3132 | } |
3133 | |
3134 | pd->device_alloc_chan_resources = pl330_alloc_chan_resources; |
3135 | pd->device_free_chan_resources = pl330_free_chan_resources; |
3136 | pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; |
3137 | pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; |
3138 | pd->device_tx_status = pl330_tx_status; |
3139 | pd->device_prep_slave_sg = pl330_prep_slave_sg; |
3140 | pd->device_config = pl330_config; |
3141 | pd->device_pause = pl330_pause; |
3142 | pd->device_terminate_all = pl330_terminate_all; |
3143 | pd->device_issue_pending = pl330_issue_pending; |
3144 | pd->src_addr_widths = PL330_DMA_BUSWIDTHS; |
3145 | pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; |
3146 | pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
3147 | pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
3148 | pd->max_burst = PL330_MAX_BURST; |
3149 | |
3150 | ret = dma_async_device_register(device: pd); |
3151 | if (ret) { |
3152 | dev_err(&adev->dev, "unable to register DMAC\n"); |
3153 | goto probe_err3; |
3154 | } |
3155 | |
3156 | if (adev->dev.of_node) { |
3157 | ret = of_dma_controller_register(np: adev->dev.of_node, |
3158 | of_dma_xlate: of_dma_pl330_xlate, data: pl330); |
3159 | if (ret) { |
3160 | dev_err(&adev->dev, |
3161 | "unable to register DMA to the generic DT DMA helpers\n"); |
3162 | } |
3163 | } |
3164 | |
3165 | /* |
3166 | * This is the limit for transfers with a buswidth of 1, larger |
3167 | * buswidths will have larger limits. |
3168 | */ |
3169 | ret = dma_set_max_seg_size(dev: &adev->dev, size: 1900800); |
3170 | if (ret) |
3171 | dev_err(&adev->dev, "unable to set the seg size\n"); |
3172 | |
3173 | |
3174 | init_pl330_debugfs(pl330); |
3175 | dev_info(&adev->dev, |
3176 | "Loaded driver for PL330 DMAC-%x\n", adev->periphid); |
3177 | dev_info(&adev->dev, |
3178 | "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", |
3179 | pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, |
3180 | pcfg->num_peri, pcfg->num_events); |
3181 | |
3182 | pm_runtime_irq_safe(dev: &adev->dev); |
3183 | pm_runtime_use_autosuspend(dev: &adev->dev); |
3184 | pm_runtime_set_autosuspend_delay(dev: &adev->dev, PL330_AUTOSUSPEND_DELAY); |
3185 | pm_runtime_mark_last_busy(dev: &adev->dev); |
3186 | pm_runtime_put_autosuspend(dev: &adev->dev); |
3187 | |
3188 | return 0; |
3189 | probe_err3: |
3190 | /* Idle the DMAC */ |
3191 | list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, |
3192 | chan.device_node) { |
3193 | |
3194 | /* Remove the channel */ |
3195 | list_del(entry: &pch->chan.device_node); |
3196 | |
3197 | /* Flush the channel */ |
3198 | if (pch->thread) { |
3199 | pl330_terminate_all(chan: &pch->chan); |
3200 | pl330_free_chan_resources(chan: &pch->chan); |
3201 | } |
3202 | } |
3203 | probe_err2: |
3204 | pl330_del(pl330); |
3205 | |
3206 | if (pl330->rstc_ocp) |
3207 | reset_control_assert(rstc: pl330->rstc_ocp); |
3208 | |
3209 | if (pl330->rstc) |
3210 | reset_control_assert(rstc: pl330->rstc); |
3211 | return ret; |
3212 | } |
3213 | |
3214 | static void pl330_remove(struct amba_device *adev) |
3215 | { |
3216 | struct pl330_dmac *pl330 = amba_get_drvdata(adev); |
3217 | struct dma_pl330_chan *pch, *_p; |
3218 | int i, irq; |
3219 | |
3220 | pm_runtime_get_noresume(dev: pl330->ddma.dev); |
3221 | |
3222 | if (adev->dev.of_node) |
3223 | of_dma_controller_free(np: adev->dev.of_node); |
3224 | |
3225 | for (i = 0; i < AMBA_NR_IRQS; i++) { |
3226 | irq = adev->irq[i]; |
3227 | if (irq) |
3228 | devm_free_irq(dev: &adev->dev, irq, dev_id: pl330); |
3229 | } |
3230 | |
3231 | dma_async_device_unregister(device: &pl330->ddma); |
3232 | |
3233 | /* Idle the DMAC */ |
3234 | list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, |
3235 | chan.device_node) { |
3236 | |
3237 | /* Remove the channel */ |
3238 | list_del(entry: &pch->chan.device_node); |
3239 | |
3240 | /* Flush the channel */ |
3241 | if (pch->thread) { |
3242 | pl330_terminate_all(chan: &pch->chan); |
3243 | pl330_free_chan_resources(chan: &pch->chan); |
3244 | } |
3245 | } |
3246 | |
3247 | pl330_del(pl330); |
3248 | |
3249 | if (pl330->rstc_ocp) |
3250 | reset_control_assert(rstc: pl330->rstc_ocp); |
3251 | |
3252 | if (pl330->rstc) |
3253 | reset_control_assert(rstc: pl330->rstc); |
3254 | } |
3255 | |
3256 | static const struct amba_id pl330_ids[] = { |
3257 | { |
3258 | .id = 0x00041330, |
3259 | .mask = 0x000fffff, |
3260 | }, |
3261 | { 0, 0 }, |
3262 | }; |
3263 | |
3264 | MODULE_DEVICE_TABLE(amba, pl330_ids); |
3265 | |
3266 | static struct amba_driver pl330_driver = { |
3267 | .drv = { |
3268 | .owner = THIS_MODULE, |
3269 | .name = "dma-pl330", |
3270 | .pm = &pl330_pm, |
3271 | }, |
3272 | .id_table = pl330_ids, |
3273 | .probe = pl330_probe, |
3274 | .remove = pl330_remove, |
3275 | }; |
3276 | |
3277 | module_amba_driver(pl330_driver); |
3278 | |
3279 | MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>"); |
3280 | MODULE_DESCRIPTION("API Driver for PL330 DMAC"); |
3281 | MODULE_LICENSE("GPL"); |
3282 |
Definitions
- pl330_cachectrl
- pl330_byteswap
- pl330_config
- pl330_reqcfg
- pl330_xfer
- pl330_op_err
- dmamov_dst
- pl330_dst
- pl330_cond
- _pl330_req
- _pl330_tbd
- pl330_thread
- pl330_dmac_state
- desc_status
- dma_pl330_chan
- pl330_dmac
- pl330_of_quirks
- of_quirks
- dma_pl330_desc
- _xfer_spec
- _queue_full
- is_manager
- _manager_ns
- get_revision
- _emit_END
- _emit_FLUSHP
- _emit_LD
- _emit_LDP
- _emit_LP
- _arg_LPEND
- _emit_LPEND
- _emit_KILL
- _emit_MOV
- _emit_RMB
- _emit_SEV
- _emit_ST
- _emit_STP
- _emit_WFP
- _emit_WMB
- _arg_GO
- _emit_GO
- _until_dmac_idle
- _execute_DBGINSN
- _state
- _stop
- _trigger
- pl330_start_thread
- _ldst_memtomem
- _emit_load
- _emit_store
- _ldst_peripheral
- _bursts
- _dregs
- _loop
- _setup_loops
- _setup_xfer
- _setup_req
- _prepare_ccr
- pl330_submit_req
- dma_pl330_rqcb
- pl330_dotask
- pl330_update
- _alloc_event
- _chan_ns
- pl330_request_channel
- _free_event
- pl330_release_channel
- read_dmac_config
- _reset_thread
- dmac_alloc_threads
- dmac_alloc_resources
- pl330_add
- dmac_free_threads
- pl330_del
- pl330_driver
- to_pchan
- to_desc
- fill_queue
- pl330_tasklet
- of_dma_pl330_xlate
- pl330_alloc_chan_resources
- pl330_dma_slave_map_dir
- pl330_unprep_slave_fifo
- pl330_prep_slave_fifo
- fixup_burst_len
- pl330_config_write
- pl330_config
- pl330_terminate_all
- pl330_pause
- pl330_free_chan_resources
- pl330_get_current_xferred_count
- pl330_tx_status
- pl330_issue_pending
- pl330_tx_submit
- _init_desc
- add_desc
- pluck_desc
- pl330_get_desc
- fill_px
- __pl330_prep_dma_memcpy
- get_burst_len
- pl330_prep_dma_cyclic
- pl330_prep_dma_memcpy
- __pl330_giveback_desc
- pl330_prep_slave_sg
- pl330_irq_handler
- pl330_debugfs_show
- init_pl330_debugfs
- pl330_suspend
- pl330_resume
- pl330_pm
- pl330_probe
- pl330_remove
- pl330_ids
Improve your Profiling and Debugging skills
Find out more