1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Virtual DMA channel support for DMAengine |
4 | * |
5 | * Copyright (C) 2012 Russell King |
6 | */ |
7 | #ifndef VIRT_DMA_H |
8 | #define VIRT_DMA_H |
9 | |
10 | #include <linux/dmaengine.h> |
11 | #include <linux/interrupt.h> |
12 | |
13 | #include "dmaengine.h" |
14 | |
15 | struct virt_dma_desc { |
16 | struct dma_async_tx_descriptor tx; |
17 | struct dmaengine_result tx_result; |
18 | /* protected by vc.lock */ |
19 | struct list_head node; |
20 | }; |
21 | |
22 | struct virt_dma_chan { |
23 | struct dma_chan chan; |
24 | struct tasklet_struct task; |
25 | void (*desc_free)(struct virt_dma_desc *); |
26 | |
27 | spinlock_t lock; |
28 | |
29 | /* protected by vc.lock */ |
30 | struct list_head desc_allocated; |
31 | struct list_head desc_submitted; |
32 | struct list_head desc_issued; |
33 | struct list_head desc_completed; |
34 | struct list_head desc_terminated; |
35 | |
36 | struct virt_dma_desc *cyclic; |
37 | }; |
38 | |
39 | static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) |
40 | { |
41 | return container_of(chan, struct virt_dma_chan, chan); |
42 | } |
43 | |
44 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); |
45 | void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); |
46 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); |
47 | extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); |
48 | extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *); |
49 | |
50 | /** |
51 | * vchan_tx_prep - prepare a descriptor |
52 | * @vc: virtual channel allocating this descriptor |
53 | * @vd: virtual descriptor to prepare |
54 | * @tx_flags: flags argument passed in to prepare function |
55 | */ |
56 | static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, |
57 | struct virt_dma_desc *vd, unsigned long tx_flags) |
58 | { |
59 | unsigned long flags; |
60 | |
61 | dma_async_tx_descriptor_init(tx: &vd->tx, chan: &vc->chan); |
62 | vd->tx.flags = tx_flags; |
63 | vd->tx.tx_submit = vchan_tx_submit; |
64 | vd->tx.desc_free = vchan_tx_desc_free; |
65 | |
66 | vd->tx_result.result = DMA_TRANS_NOERROR; |
67 | vd->tx_result.residue = 0; |
68 | |
69 | spin_lock_irqsave(&vc->lock, flags); |
70 | list_add_tail(new: &vd->node, head: &vc->desc_allocated); |
71 | spin_unlock_irqrestore(lock: &vc->lock, flags); |
72 | |
73 | return &vd->tx; |
74 | } |
75 | |
76 | /** |
77 | * vchan_issue_pending - move submitted descriptors to issued list |
78 | * @vc: virtual channel to update |
79 | * |
80 | * vc.lock must be held by caller |
81 | */ |
82 | static inline bool vchan_issue_pending(struct virt_dma_chan *vc) |
83 | { |
84 | lockdep_assert_held(&vc->lock); |
85 | |
86 | list_splice_tail_init(list: &vc->desc_submitted, head: &vc->desc_issued); |
87 | return !list_empty(head: &vc->desc_issued); |
88 | } |
89 | |
90 | /** |
91 | * vchan_cookie_complete - report completion of a descriptor |
92 | * @vd: virtual descriptor to update |
93 | * |
94 | * vc.lock must be held by caller |
95 | */ |
96 | static inline void vchan_cookie_complete(struct virt_dma_desc *vd) |
97 | { |
98 | struct virt_dma_chan *vc = to_virt_chan(chan: vd->tx.chan); |
99 | dma_cookie_t cookie; |
100 | |
101 | lockdep_assert_held(&vc->lock); |
102 | |
103 | cookie = vd->tx.cookie; |
104 | dma_cookie_complete(tx: &vd->tx); |
105 | dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n" , |
106 | vd, cookie); |
107 | list_add_tail(new: &vd->node, head: &vc->desc_completed); |
108 | |
109 | tasklet_schedule(t: &vc->task); |
110 | } |
111 | |
112 | /** |
113 | * vchan_vdesc_fini - Free or reuse a descriptor |
114 | * @vd: virtual descriptor to free/reuse |
115 | */ |
116 | static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) |
117 | { |
118 | struct virt_dma_chan *vc = to_virt_chan(chan: vd->tx.chan); |
119 | |
120 | if (dmaengine_desc_test_reuse(tx: &vd->tx)) { |
121 | unsigned long flags; |
122 | |
123 | spin_lock_irqsave(&vc->lock, flags); |
124 | list_add(new: &vd->node, head: &vc->desc_allocated); |
125 | spin_unlock_irqrestore(lock: &vc->lock, flags); |
126 | } else { |
127 | vc->desc_free(vd); |
128 | } |
129 | } |
130 | |
131 | /** |
132 | * vchan_cyclic_callback - report the completion of a period |
133 | * @vd: virtual descriptor |
134 | */ |
135 | static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) |
136 | { |
137 | struct virt_dma_chan *vc = to_virt_chan(chan: vd->tx.chan); |
138 | |
139 | vc->cyclic = vd; |
140 | tasklet_schedule(t: &vc->task); |
141 | } |
142 | |
143 | /** |
144 | * vchan_terminate_vdesc - Disable pending cyclic callback |
145 | * @vd: virtual descriptor to be terminated |
146 | * |
147 | * vc.lock must be held by caller |
148 | */ |
149 | static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) |
150 | { |
151 | struct virt_dma_chan *vc = to_virt_chan(chan: vd->tx.chan); |
152 | |
153 | lockdep_assert_held(&vc->lock); |
154 | |
155 | list_add_tail(new: &vd->node, head: &vc->desc_terminated); |
156 | |
157 | if (vc->cyclic == vd) |
158 | vc->cyclic = NULL; |
159 | } |
160 | |
161 | /** |
162 | * vchan_next_desc - peek at the next descriptor to be processed |
163 | * @vc: virtual channel to obtain descriptor from |
164 | * |
165 | * vc.lock must be held by caller |
166 | */ |
167 | static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) |
168 | { |
169 | lockdep_assert_held(&vc->lock); |
170 | |
171 | return list_first_entry_or_null(&vc->desc_issued, |
172 | struct virt_dma_desc, node); |
173 | } |
174 | |
175 | /** |
176 | * vchan_get_all_descriptors - obtain all submitted and issued descriptors |
177 | * @vc: virtual channel to get descriptors from |
178 | * @head: list of descriptors found |
179 | * |
180 | * vc.lock must be held by caller |
181 | * |
182 | * Removes all submitted and issued descriptors from internal lists, and |
183 | * provides a list of all descriptors found |
184 | */ |
185 | static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, |
186 | struct list_head *head) |
187 | { |
188 | lockdep_assert_held(&vc->lock); |
189 | |
190 | list_splice_tail_init(list: &vc->desc_allocated, head); |
191 | list_splice_tail_init(list: &vc->desc_submitted, head); |
192 | list_splice_tail_init(list: &vc->desc_issued, head); |
193 | list_splice_tail_init(list: &vc->desc_completed, head); |
194 | list_splice_tail_init(list: &vc->desc_terminated, head); |
195 | } |
196 | |
197 | static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) |
198 | { |
199 | struct virt_dma_desc *vd; |
200 | unsigned long flags; |
201 | LIST_HEAD(head); |
202 | |
203 | spin_lock_irqsave(&vc->lock, flags); |
204 | vchan_get_all_descriptors(vc, head: &head); |
205 | list_for_each_entry(vd, &head, node) |
206 | dmaengine_desc_clear_reuse(tx: &vd->tx); |
207 | spin_unlock_irqrestore(lock: &vc->lock, flags); |
208 | |
209 | vchan_dma_desc_free_list(vc, head: &head); |
210 | } |
211 | |
212 | /** |
213 | * vchan_synchronize() - synchronize callback execution to the current context |
214 | * @vc: virtual channel to synchronize |
215 | * |
216 | * Makes sure that all scheduled or active callbacks have finished running. For |
217 | * proper operation the caller has to ensure that no new callbacks are scheduled |
218 | * after the invocation of this function started. |
219 | * Free up the terminated cyclic descriptor to prevent memory leakage. |
220 | */ |
221 | static inline void vchan_synchronize(struct virt_dma_chan *vc) |
222 | { |
223 | LIST_HEAD(head); |
224 | unsigned long flags; |
225 | |
226 | tasklet_kill(t: &vc->task); |
227 | |
228 | spin_lock_irqsave(&vc->lock, flags); |
229 | |
230 | list_splice_tail_init(list: &vc->desc_terminated, head: &head); |
231 | |
232 | spin_unlock_irqrestore(lock: &vc->lock, flags); |
233 | |
234 | vchan_dma_desc_free_list(vc, head: &head); |
235 | } |
236 | |
237 | #endif |
238 | |