1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * AMD Cryptographic Coprocessor (CCP) driver |
4 | * |
5 | * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. |
6 | * |
7 | * Author: Gary R Hook <gary.hook@amd.com> |
8 | */ |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/dmaengine.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/mutex.h> |
16 | #include <linux/ccp.h> |
17 | |
18 | #include "ccp-dev.h" |
19 | #include "../../dma/dmaengine.h" |
20 | |
21 | #define CCP_DMA_WIDTH(_mask) \ |
22 | ({ \ |
23 | u64 mask = _mask + 1; \ |
24 | (mask == 0) ? 64 : fls64(mask); \ |
25 | }) |
26 | |
27 | /* The CCP as a DMA provider can be configured for public or private |
28 | * channels. Default is specified in the vdata for the device (PCI ID). |
29 | * This module parameter will override for all channels on all devices: |
30 | * dma_chan_attr = 0x2 to force all channels public |
31 | * = 0x1 to force all channels private |
32 | * = 0x0 to defer to the vdata setting |
33 | * = any other value: warning, revert to 0x0 |
34 | */ |
35 | static unsigned int dma_chan_attr = CCP_DMA_DFLT; |
36 | module_param(dma_chan_attr, uint, 0444); |
37 | MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public"); |
38 | |
39 | static unsigned int dmaengine = 1; |
40 | module_param(dmaengine, uint, 0444); |
41 | MODULE_PARM_DESC(dmaengine, "Register services with the DMA subsystem (any non-zero value, default: 1)"); |
42 | |
43 | static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) |
44 | { |
45 | switch (dma_chan_attr) { |
46 | case CCP_DMA_DFLT: |
47 | return ccp->vdata->dma_chan_attr; |
48 | |
49 | case CCP_DMA_PRIV: |
50 | return DMA_PRIVATE; |
51 | |
52 | case CCP_DMA_PUB: |
53 | return 0; |
54 | |
55 | default: |
56 | dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n", |
57 | dma_chan_attr); |
58 | return ccp->vdata->dma_chan_attr; |
59 | } |
60 | } |
61 | |
62 | static void ccp_free_cmd_resources(struct ccp_device *ccp, |
63 | struct list_head *list) |
64 | { |
65 | struct ccp_dma_cmd *cmd, *ctmp; |
66 | |
67 | list_for_each_entry_safe(cmd, ctmp, list, entry) { |
68 | list_del(entry: &cmd->entry); |
69 | kmem_cache_free(s: ccp->dma_cmd_cache, objp: cmd); |
70 | } |
71 | } |
72 | |
73 | static void ccp_free_desc_resources(struct ccp_device *ccp, |
74 | struct list_head *list) |
75 | { |
76 | struct ccp_dma_desc *desc, *dtmp; |
77 | |
78 | list_for_each_entry_safe(desc, dtmp, list, entry) { |
79 | ccp_free_cmd_resources(ccp, list: &desc->active); |
80 | ccp_free_cmd_resources(ccp, list: &desc->pending); |
81 | |
82 | list_del(entry: &desc->entry); |
83 | kmem_cache_free(s: ccp->dma_desc_cache, objp: desc); |
84 | } |
85 | } |
86 | |
87 | static void ccp_free_chan_resources(struct dma_chan *dma_chan) |
88 | { |
89 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, |
90 | dma_chan); |
91 | unsigned long flags; |
92 | |
93 | dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan); |
94 | |
95 | spin_lock_irqsave(&chan->lock, flags); |
96 | |
97 | ccp_free_desc_resources(ccp: chan->ccp, list: &chan->complete); |
98 | ccp_free_desc_resources(ccp: chan->ccp, list: &chan->active); |
99 | ccp_free_desc_resources(ccp: chan->ccp, list: &chan->pending); |
100 | ccp_free_desc_resources(ccp: chan->ccp, list: &chan->created); |
101 | |
102 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
103 | } |
104 | |
105 | static void ccp_cleanup_desc_resources(struct ccp_device *ccp, |
106 | struct list_head *list) |
107 | { |
108 | struct ccp_dma_desc *desc, *dtmp; |
109 | |
110 | list_for_each_entry_safe_reverse(desc, dtmp, list, entry) { |
111 | if (!async_tx_test_ack(tx: &desc->tx_desc)) |
112 | continue; |
113 | |
114 | dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc); |
115 | |
116 | ccp_free_cmd_resources(ccp, list: &desc->active); |
117 | ccp_free_cmd_resources(ccp, list: &desc->pending); |
118 | |
119 | list_del(entry: &desc->entry); |
120 | kmem_cache_free(s: ccp->dma_desc_cache, objp: desc); |
121 | } |
122 | } |
123 | |
124 | static void ccp_do_cleanup(unsigned long data) |
125 | { |
126 | struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data; |
127 | unsigned long flags; |
128 | |
129 | dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__, |
130 | dma_chan_name(&chan->dma_chan)); |
131 | |
132 | spin_lock_irqsave(&chan->lock, flags); |
133 | |
134 | ccp_cleanup_desc_resources(ccp: chan->ccp, list: &chan->complete); |
135 | |
136 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
137 | } |
138 | |
139 | static int ccp_issue_next_cmd(struct ccp_dma_desc *desc) |
140 | { |
141 | struct ccp_dma_cmd *cmd; |
142 | int ret; |
143 | |
144 | cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry); |
145 | list_move(list: &cmd->entry, head: &desc->active); |
146 | |
147 | dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__, |
148 | desc->tx_desc.cookie, cmd); |
149 | |
150 | ret = ccp_enqueue_cmd(cmd: &cmd->ccp_cmd); |
151 | if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY)) |
152 | return 0; |
153 | |
154 | dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__, |
155 | ret, desc->tx_desc.cookie, cmd); |
156 | |
157 | return ret; |
158 | } |
159 | |
160 | static void ccp_free_active_cmd(struct ccp_dma_desc *desc) |
161 | { |
162 | struct ccp_dma_cmd *cmd; |
163 | |
164 | cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd, |
165 | entry); |
166 | if (!cmd) |
167 | return; |
168 | |
169 | dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n", |
170 | __func__, desc->tx_desc.cookie, cmd); |
171 | |
172 | list_del(entry: &cmd->entry); |
173 | kmem_cache_free(s: desc->ccp->dma_cmd_cache, objp: cmd); |
174 | } |
175 | |
176 | static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan, |
177 | struct ccp_dma_desc *desc) |
178 | { |
179 | /* Move current DMA descriptor to the complete list */ |
180 | if (desc) |
181 | list_move(list: &desc->entry, head: &chan->complete); |
182 | |
183 | /* Get the next DMA descriptor on the active list */ |
184 | desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc, |
185 | entry); |
186 | |
187 | return desc; |
188 | } |
189 | |
190 | static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan, |
191 | struct ccp_dma_desc *desc) |
192 | { |
193 | struct dma_async_tx_descriptor *tx_desc; |
194 | unsigned long flags; |
195 | |
196 | /* Loop over descriptors until one is found with commands */ |
197 | do { |
198 | if (desc) { |
199 | /* Remove the DMA command from the list and free it */ |
200 | ccp_free_active_cmd(desc); |
201 | |
202 | if (!list_empty(head: &desc->pending)) { |
203 | /* No errors, keep going */ |
204 | if (desc->status != DMA_ERROR) |
205 | return desc; |
206 | |
207 | /* Error, free remaining commands and move on */ |
208 | ccp_free_cmd_resources(ccp: desc->ccp, |
209 | list: &desc->pending); |
210 | } |
211 | |
212 | tx_desc = &desc->tx_desc; |
213 | } else { |
214 | tx_desc = NULL; |
215 | } |
216 | |
217 | spin_lock_irqsave(&chan->lock, flags); |
218 | |
219 | if (desc) { |
220 | if (desc->status != DMA_ERROR) |
221 | desc->status = DMA_COMPLETE; |
222 | |
223 | dev_dbg(desc->ccp->dev, |
224 | "%s - tx %d complete, status=%u\n", __func__, |
225 | desc->tx_desc.cookie, desc->status); |
226 | |
227 | dma_cookie_complete(tx: tx_desc); |
228 | dma_descriptor_unmap(tx: tx_desc); |
229 | } |
230 | |
231 | desc = __ccp_next_dma_desc(chan, desc); |
232 | |
233 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
234 | |
235 | if (tx_desc) { |
236 | dmaengine_desc_get_callback_invoke(tx: tx_desc, NULL); |
237 | |
238 | dma_run_dependencies(tx: tx_desc); |
239 | } |
240 | } while (desc); |
241 | |
242 | return NULL; |
243 | } |
244 | |
245 | static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan) |
246 | { |
247 | struct ccp_dma_desc *desc; |
248 | |
249 | if (list_empty(head: &chan->pending)) |
250 | return NULL; |
251 | |
252 | desc = list_empty(head: &chan->active) |
253 | ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry) |
254 | : NULL; |
255 | |
256 | list_splice_tail_init(list: &chan->pending, head: &chan->active); |
257 | |
258 | return desc; |
259 | } |
260 | |
261 | static void ccp_cmd_callback(void *data, int err) |
262 | { |
263 | struct ccp_dma_desc *desc = data; |
264 | struct ccp_dma_chan *chan; |
265 | int ret; |
266 | |
267 | if (err == -EINPROGRESS) |
268 | return; |
269 | |
270 | chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan, |
271 | dma_chan); |
272 | |
273 | dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n", |
274 | __func__, desc->tx_desc.cookie, err); |
275 | |
276 | if (err) |
277 | desc->status = DMA_ERROR; |
278 | |
279 | while (true) { |
280 | /* Check for DMA descriptor completion */ |
281 | desc = ccp_handle_active_desc(chan, desc); |
282 | |
283 | /* Don't submit cmd if no descriptor or DMA is paused */ |
284 | if (!desc || (chan->status == DMA_PAUSED)) |
285 | break; |
286 | |
287 | ret = ccp_issue_next_cmd(desc); |
288 | if (!ret) |
289 | break; |
290 | |
291 | desc->status = DMA_ERROR; |
292 | } |
293 | |
294 | tasklet_schedule(t: &chan->cleanup_tasklet); |
295 | } |
296 | |
297 | static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc) |
298 | { |
299 | struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc, |
300 | tx_desc); |
301 | struct ccp_dma_chan *chan; |
302 | dma_cookie_t cookie; |
303 | unsigned long flags; |
304 | |
305 | chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan); |
306 | |
307 | spin_lock_irqsave(&chan->lock, flags); |
308 | |
309 | cookie = dma_cookie_assign(tx: tx_desc); |
310 | list_move_tail(list: &desc->entry, head: &chan->pending); |
311 | |
312 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
313 | |
314 | dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n", |
315 | __func__, cookie); |
316 | |
317 | return cookie; |
318 | } |
319 | |
320 | static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan) |
321 | { |
322 | struct ccp_dma_cmd *cmd; |
323 | |
324 | cmd = kmem_cache_alloc(cachep: chan->ccp->dma_cmd_cache, GFP_NOWAIT); |
325 | if (cmd) |
326 | memset(cmd, 0, sizeof(*cmd)); |
327 | |
328 | return cmd; |
329 | } |
330 | |
331 | static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan, |
332 | unsigned long flags) |
333 | { |
334 | struct ccp_dma_desc *desc; |
335 | |
336 | desc = kmem_cache_zalloc(k: chan->ccp->dma_desc_cache, GFP_NOWAIT); |
337 | if (!desc) |
338 | return NULL; |
339 | |
340 | dma_async_tx_descriptor_init(tx: &desc->tx_desc, chan: &chan->dma_chan); |
341 | desc->tx_desc.flags = flags; |
342 | desc->tx_desc.tx_submit = ccp_tx_submit; |
343 | desc->ccp = chan->ccp; |
344 | INIT_LIST_HEAD(list: &desc->entry); |
345 | INIT_LIST_HEAD(list: &desc->pending); |
346 | INIT_LIST_HEAD(list: &desc->active); |
347 | desc->status = DMA_IN_PROGRESS; |
348 | |
349 | return desc; |
350 | } |
351 | |
352 | static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, |
353 | struct scatterlist *dst_sg, |
354 | unsigned int dst_nents, |
355 | struct scatterlist *src_sg, |
356 | unsigned int src_nents, |
357 | unsigned long flags) |
358 | { |
359 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, |
360 | dma_chan); |
361 | struct ccp_device *ccp = chan->ccp; |
362 | struct ccp_dma_desc *desc; |
363 | struct ccp_dma_cmd *cmd; |
364 | struct ccp_cmd *ccp_cmd; |
365 | struct ccp_passthru_nomap_engine *ccp_pt; |
366 | unsigned int src_offset, src_len; |
367 | unsigned int dst_offset, dst_len; |
368 | unsigned int len; |
369 | unsigned long sflags; |
370 | size_t total_len; |
371 | |
372 | if (!dst_sg || !src_sg) |
373 | return NULL; |
374 | |
375 | if (!dst_nents || !src_nents) |
376 | return NULL; |
377 | |
378 | desc = ccp_alloc_dma_desc(chan, flags); |
379 | if (!desc) |
380 | return NULL; |
381 | |
382 | total_len = 0; |
383 | |
384 | src_len = sg_dma_len(src_sg); |
385 | src_offset = 0; |
386 | |
387 | dst_len = sg_dma_len(dst_sg); |
388 | dst_offset = 0; |
389 | |
390 | while (true) { |
391 | if (!src_len) { |
392 | src_nents--; |
393 | if (!src_nents) |
394 | break; |
395 | |
396 | src_sg = sg_next(src_sg); |
397 | if (!src_sg) |
398 | break; |
399 | |
400 | src_len = sg_dma_len(src_sg); |
401 | src_offset = 0; |
402 | continue; |
403 | } |
404 | |
405 | if (!dst_len) { |
406 | dst_nents--; |
407 | if (!dst_nents) |
408 | break; |
409 | |
410 | dst_sg = sg_next(dst_sg); |
411 | if (!dst_sg) |
412 | break; |
413 | |
414 | dst_len = sg_dma_len(dst_sg); |
415 | dst_offset = 0; |
416 | continue; |
417 | } |
418 | |
419 | len = min(dst_len, src_len); |
420 | |
421 | cmd = ccp_alloc_dma_cmd(chan); |
422 | if (!cmd) |
423 | goto err; |
424 | |
425 | ccp_cmd = &cmd->ccp_cmd; |
426 | ccp_cmd->ccp = chan->ccp; |
427 | ccp_pt = &ccp_cmd->u.passthru_nomap; |
428 | ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; |
429 | ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; |
430 | ccp_cmd->engine = CCP_ENGINE_PASSTHRU; |
431 | ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP; |
432 | ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; |
433 | ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset; |
434 | ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset; |
435 | ccp_pt->src_len = len; |
436 | ccp_pt->final = 1; |
437 | ccp_cmd->callback = ccp_cmd_callback; |
438 | ccp_cmd->data = desc; |
439 | |
440 | list_add_tail(new: &cmd->entry, head: &desc->pending); |
441 | |
442 | dev_dbg(ccp->dev, |
443 | "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__, |
444 | cmd, &ccp_pt->src_dma, |
445 | &ccp_pt->dst_dma, ccp_pt->src_len); |
446 | |
447 | total_len += len; |
448 | |
449 | src_len -= len; |
450 | src_offset += len; |
451 | |
452 | dst_len -= len; |
453 | dst_offset += len; |
454 | } |
455 | |
456 | desc->len = total_len; |
457 | |
458 | if (list_empty(head: &desc->pending)) |
459 | goto err; |
460 | |
461 | dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc); |
462 | |
463 | spin_lock_irqsave(&chan->lock, sflags); |
464 | |
465 | list_add_tail(new: &desc->entry, head: &chan->created); |
466 | |
467 | spin_unlock_irqrestore(lock: &chan->lock, flags: sflags); |
468 | |
469 | return desc; |
470 | |
471 | err: |
472 | ccp_free_cmd_resources(ccp, list: &desc->pending); |
473 | kmem_cache_free(s: ccp->dma_desc_cache, objp: desc); |
474 | |
475 | return NULL; |
476 | } |
477 | |
478 | static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy( |
479 | struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len, |
480 | unsigned long flags) |
481 | { |
482 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, |
483 | dma_chan); |
484 | struct ccp_dma_desc *desc; |
485 | struct scatterlist dst_sg, src_sg; |
486 | |
487 | dev_dbg(chan->ccp->dev, |
488 | "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n", |
489 | __func__, &src, &dst, len, flags); |
490 | |
491 | sg_init_table(&dst_sg, 1); |
492 | sg_dma_address(&dst_sg) = dst; |
493 | sg_dma_len(&dst_sg) = len; |
494 | |
495 | sg_init_table(&src_sg, 1); |
496 | sg_dma_address(&src_sg) = src; |
497 | sg_dma_len(&src_sg) = len; |
498 | |
499 | desc = ccp_create_desc(dma_chan, dst_sg: &dst_sg, dst_nents: 1, src_sg: &src_sg, src_nents: 1, flags); |
500 | if (!desc) |
501 | return NULL; |
502 | |
503 | return &desc->tx_desc; |
504 | } |
505 | |
506 | static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt( |
507 | struct dma_chan *dma_chan, unsigned long flags) |
508 | { |
509 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, |
510 | dma_chan); |
511 | struct ccp_dma_desc *desc; |
512 | |
513 | desc = ccp_alloc_dma_desc(chan, flags); |
514 | if (!desc) |
515 | return NULL; |
516 | |
517 | return &desc->tx_desc; |
518 | } |
519 | |
520 | static void ccp_issue_pending(struct dma_chan *dma_chan) |
521 | { |
522 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, |
523 | dma_chan); |
524 | struct ccp_dma_desc *desc; |
525 | unsigned long flags; |
526 | |
527 | dev_dbg(chan->ccp->dev, "%s\n", __func__); |
528 | |
529 | spin_lock_irqsave(&chan->lock, flags); |
530 | |
531 | desc = __ccp_pending_to_active(chan); |
532 | |
533 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
534 | |
535 | /* If there was nothing active, start processing */ |
536 | if (desc) |
537 | ccp_cmd_callback(data: desc, err: 0); |
538 | } |
539 | |
540 | static enum dma_status ccp_tx_status(struct dma_chan *dma_chan, |
541 | dma_cookie_t cookie, |
542 | struct dma_tx_state *state) |
543 | { |
544 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, |
545 | dma_chan); |
546 | struct ccp_dma_desc *desc; |
547 | enum dma_status ret; |
548 | unsigned long flags; |
549 | |
550 | if (chan->status == DMA_PAUSED) { |
551 | ret = DMA_PAUSED; |
552 | goto out; |
553 | } |
554 | |
555 | ret = dma_cookie_status(chan: dma_chan, cookie, state); |
556 | if (ret == DMA_COMPLETE) { |
557 | spin_lock_irqsave(&chan->lock, flags); |
558 | |
559 | /* Get status from complete chain, if still there */ |
560 | list_for_each_entry(desc, &chan->complete, entry) { |
561 | if (desc->tx_desc.cookie != cookie) |
562 | continue; |
563 | |
564 | ret = desc->status; |
565 | break; |
566 | } |
567 | |
568 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
569 | } |
570 | |
571 | out: |
572 | dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret); |
573 | |
574 | return ret; |
575 | } |
576 | |
577 | static int ccp_pause(struct dma_chan *dma_chan) |
578 | { |
579 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, |
580 | dma_chan); |
581 | |
582 | chan->status = DMA_PAUSED; |
583 | |
584 | /*TODO: Wait for active DMA to complete before returning? */ |
585 | |
586 | return 0; |
587 | } |
588 | |
589 | static int ccp_resume(struct dma_chan *dma_chan) |
590 | { |
591 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, |
592 | dma_chan); |
593 | struct ccp_dma_desc *desc; |
594 | unsigned long flags; |
595 | |
596 | spin_lock_irqsave(&chan->lock, flags); |
597 | |
598 | desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc, |
599 | entry); |
600 | |
601 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
602 | |
603 | /* Indicate the channel is running again */ |
604 | chan->status = DMA_IN_PROGRESS; |
605 | |
606 | /* If there was something active, re-start */ |
607 | if (desc) |
608 | ccp_cmd_callback(data: desc, err: 0); |
609 | |
610 | return 0; |
611 | } |
612 | |
613 | static int ccp_terminate_all(struct dma_chan *dma_chan) |
614 | { |
615 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, |
616 | dma_chan); |
617 | unsigned long flags; |
618 | |
619 | dev_dbg(chan->ccp->dev, "%s\n", __func__); |
620 | |
621 | /*TODO: Wait for active DMA to complete before continuing */ |
622 | |
623 | spin_lock_irqsave(&chan->lock, flags); |
624 | |
625 | /*TODO: Purge the complete list? */ |
626 | ccp_free_desc_resources(ccp: chan->ccp, list: &chan->active); |
627 | ccp_free_desc_resources(ccp: chan->ccp, list: &chan->pending); |
628 | ccp_free_desc_resources(ccp: chan->ccp, list: &chan->created); |
629 | |
630 | spin_unlock_irqrestore(lock: &chan->lock, flags); |
631 | |
632 | return 0; |
633 | } |
634 | |
635 | static void ccp_dma_release(struct ccp_device *ccp) |
636 | { |
637 | struct ccp_dma_chan *chan; |
638 | struct dma_chan *dma_chan; |
639 | unsigned int i; |
640 | |
641 | for (i = 0; i < ccp->cmd_q_count; i++) { |
642 | chan = ccp->ccp_dma_chan + i; |
643 | dma_chan = &chan->dma_chan; |
644 | |
645 | tasklet_kill(t: &chan->cleanup_tasklet); |
646 | list_del_rcu(entry: &dma_chan->device_node); |
647 | } |
648 | } |
649 | |
650 | static void ccp_dma_release_channels(struct ccp_device *ccp) |
651 | { |
652 | struct ccp_dma_chan *chan; |
653 | struct dma_chan *dma_chan; |
654 | unsigned int i; |
655 | |
656 | for (i = 0; i < ccp->cmd_q_count; i++) { |
657 | chan = ccp->ccp_dma_chan + i; |
658 | dma_chan = &chan->dma_chan; |
659 | |
660 | if (dma_chan->client_count) |
661 | dma_release_channel(chan: dma_chan); |
662 | } |
663 | } |
664 | |
665 | int ccp_dmaengine_register(struct ccp_device *ccp) |
666 | { |
667 | struct ccp_dma_chan *chan; |
668 | struct dma_device *dma_dev = &ccp->dma_dev; |
669 | struct dma_chan *dma_chan; |
670 | char *dma_cmd_cache_name; |
671 | char *dma_desc_cache_name; |
672 | unsigned int i; |
673 | int ret; |
674 | |
675 | if (!dmaengine) |
676 | return 0; |
677 | |
678 | ccp->ccp_dma_chan = devm_kcalloc(dev: ccp->dev, n: ccp->cmd_q_count, |
679 | size: sizeof(*(ccp->ccp_dma_chan)), |
680 | GFP_KERNEL); |
681 | if (!ccp->ccp_dma_chan) |
682 | return -ENOMEM; |
683 | |
684 | dma_cmd_cache_name = devm_kasprintf(dev: ccp->dev, GFP_KERNEL, |
685 | fmt: "%s-dmaengine-cmd-cache", |
686 | ccp->name); |
687 | if (!dma_cmd_cache_name) |
688 | return -ENOMEM; |
689 | |
690 | ccp->dma_cmd_cache = kmem_cache_create(name: dma_cmd_cache_name, |
691 | size: sizeof(struct ccp_dma_cmd), |
692 | align: sizeof(void *), |
693 | SLAB_HWCACHE_ALIGN, NULL); |
694 | if (!ccp->dma_cmd_cache) |
695 | return -ENOMEM; |
696 | |
697 | dma_desc_cache_name = devm_kasprintf(dev: ccp->dev, GFP_KERNEL, |
698 | fmt: "%s-dmaengine-desc-cache", |
699 | ccp->name); |
700 | if (!dma_desc_cache_name) { |
701 | ret = -ENOMEM; |
702 | goto err_cache; |
703 | } |
704 | |
705 | ccp->dma_desc_cache = kmem_cache_create(name: dma_desc_cache_name, |
706 | size: sizeof(struct ccp_dma_desc), |
707 | align: sizeof(void *), |
708 | SLAB_HWCACHE_ALIGN, NULL); |
709 | if (!ccp->dma_desc_cache) { |
710 | ret = -ENOMEM; |
711 | goto err_cache; |
712 | } |
713 | |
714 | dma_dev->dev = ccp->dev; |
715 | dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev)); |
716 | dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev)); |
717 | dma_dev->directions = DMA_MEM_TO_MEM; |
718 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; |
719 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); |
720 | dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); |
721 | |
722 | /* The DMA channels for this device can be set to public or private, |
723 | * and overridden by the module parameter dma_chan_attr. |
724 | * Default: according to the value in vdata (dma_chan_attr=0) |
725 | * dma_chan_attr=0x1: all channels private (override vdata) |
726 | * dma_chan_attr=0x2: all channels public (override vdata) |
727 | */ |
728 | if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE) |
729 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); |
730 | |
731 | INIT_LIST_HEAD(list: &dma_dev->channels); |
732 | for (i = 0; i < ccp->cmd_q_count; i++) { |
733 | chan = ccp->ccp_dma_chan + i; |
734 | dma_chan = &chan->dma_chan; |
735 | |
736 | chan->ccp = ccp; |
737 | |
738 | spin_lock_init(&chan->lock); |
739 | INIT_LIST_HEAD(list: &chan->created); |
740 | INIT_LIST_HEAD(list: &chan->pending); |
741 | INIT_LIST_HEAD(list: &chan->active); |
742 | INIT_LIST_HEAD(list: &chan->complete); |
743 | |
744 | tasklet_init(t: &chan->cleanup_tasklet, func: ccp_do_cleanup, |
745 | data: (unsigned long)chan); |
746 | |
747 | dma_chan->device = dma_dev; |
748 | dma_cookie_init(chan: dma_chan); |
749 | |
750 | list_add_tail(new: &dma_chan->device_node, head: &dma_dev->channels); |
751 | } |
752 | |
753 | dma_dev->device_free_chan_resources = ccp_free_chan_resources; |
754 | dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy; |
755 | dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt; |
756 | dma_dev->device_issue_pending = ccp_issue_pending; |
757 | dma_dev->device_tx_status = ccp_tx_status; |
758 | dma_dev->device_pause = ccp_pause; |
759 | dma_dev->device_resume = ccp_resume; |
760 | dma_dev->device_terminate_all = ccp_terminate_all; |
761 | |
762 | ret = dma_async_device_register(device: dma_dev); |
763 | if (ret) |
764 | goto err_reg; |
765 | |
766 | return 0; |
767 | |
768 | err_reg: |
769 | ccp_dma_release(ccp); |
770 | kmem_cache_destroy(s: ccp->dma_desc_cache); |
771 | |
772 | err_cache: |
773 | kmem_cache_destroy(s: ccp->dma_cmd_cache); |
774 | |
775 | return ret; |
776 | } |
777 | |
778 | void ccp_dmaengine_unregister(struct ccp_device *ccp) |
779 | { |
780 | struct dma_device *dma_dev = &ccp->dma_dev; |
781 | |
782 | if (!dmaengine) |
783 | return; |
784 | |
785 | ccp_dma_release_channels(ccp); |
786 | dma_async_device_unregister(device: dma_dev); |
787 | ccp_dma_release(ccp); |
788 | |
789 | kmem_cache_destroy(s: ccp->dma_desc_cache); |
790 | kmem_cache_destroy(s: ccp->dma_cmd_cache); |
791 | } |
792 |
Definitions
- dma_chan_attr
- dmaengine
- ccp_get_dma_chan_attr
- ccp_free_cmd_resources
- ccp_free_desc_resources
- ccp_free_chan_resources
- ccp_cleanup_desc_resources
- ccp_do_cleanup
- ccp_issue_next_cmd
- ccp_free_active_cmd
- __ccp_next_dma_desc
- ccp_handle_active_desc
- __ccp_pending_to_active
- ccp_cmd_callback
- ccp_tx_submit
- ccp_alloc_dma_cmd
- ccp_alloc_dma_desc
- ccp_create_desc
- ccp_prep_dma_memcpy
- ccp_prep_dma_interrupt
- ccp_issue_pending
- ccp_tx_status
- ccp_pause
- ccp_resume
- ccp_terminate_all
- ccp_dma_release
- ccp_dma_release_channels
- ccp_dmaengine_register
Improve your Profiling and Debugging skills
Find out more