1 | // SPDX-License-Identifier: MIT |
2 | |
3 | #include <uapi/linux/sched/types.h> |
4 | |
5 | #include <drm/drm_print.h> |
6 | #include <drm/drm_vblank.h> |
7 | #include <drm/drm_vblank_work.h> |
8 | #include <drm/drm_crtc.h> |
9 | |
10 | #include "drm_internal.h" |
11 | |
12 | /** |
13 | * DOC: vblank works |
14 | * |
15 | * Many DRM drivers need to program hardware in a time-sensitive manner, many |
16 | * times with a deadline of starting and finishing within a certain region of |
17 | * the scanout. Most of the time the safest way to accomplish this is to |
18 | * simply do said time-sensitive programming in the driver's IRQ handler, |
19 | * which allows drivers to avoid being preempted during these critical |
20 | * regions. Or even better, the hardware may even handle applying such |
21 | * time-critical programming independently of the CPU. |
22 | * |
23 | * While there's a decent amount of hardware that's designed so that the CPU |
24 | * doesn't need to be concerned with extremely time-sensitive programming, |
25 | * there's a few situations where it can't be helped. Some unforgiving |
26 | * hardware may require that certain time-sensitive programming be handled |
27 | * completely by the CPU, and said programming may even take too long to |
28 | * handle in an IRQ handler. Another such situation would be where the driver |
29 | * needs to perform a task that needs to complete within a specific scanout |
30 | * period, but might possibly block and thus cannot be handled in an IRQ |
31 | * context. Both of these situations can't be solved perfectly in Linux since |
32 | * we're not a realtime kernel, and thus the scheduler may cause us to miss |
33 | * our deadline if it decides to preempt us. But for some drivers, it's good |
34 | * enough if we can lower our chance of being preempted to an absolute |
35 | * minimum. |
36 | * |
37 | * This is where &drm_vblank_work comes in. &drm_vblank_work provides a simple |
38 | * generic delayed work implementation which delays work execution until a |
39 | * particular vblank has passed, and then executes the work at realtime |
40 | * priority. This provides the best possible chance at performing |
41 | * time-sensitive hardware programming on time, even when the system is under |
42 | * heavy load. &drm_vblank_work also supports rescheduling, so that self |
43 | * re-arming work items can be easily implemented. |
44 | */ |
45 | |
46 | void drm_handle_vblank_works(struct drm_vblank_crtc *vblank) |
47 | { |
48 | struct drm_vblank_work *work, *next; |
49 | u64 count = atomic64_read(v: &vblank->count); |
50 | bool wake = false; |
51 | |
52 | assert_spin_locked(&vblank->dev->event_lock); |
53 | |
54 | list_for_each_entry_safe(work, next, &vblank->pending_work, node) { |
55 | if (!drm_vblank_passed(seq: count, ref: work->count)) |
56 | continue; |
57 | |
58 | list_del_init(entry: &work->node); |
59 | drm_vblank_put(dev: vblank->dev, pipe: vblank->pipe); |
60 | kthread_queue_work(worker: vblank->worker, work: &work->base); |
61 | wake = true; |
62 | } |
63 | if (wake) |
64 | wake_up_all(&vblank->work_wait_queue); |
65 | } |
66 | |
67 | /* Handle cancelling any pending vblank work items and drop respective vblank |
68 | * references in response to vblank interrupts being disabled. |
69 | */ |
70 | void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank) |
71 | { |
72 | struct drm_vblank_work *work, *next; |
73 | |
74 | assert_spin_locked(&vblank->dev->event_lock); |
75 | |
76 | drm_WARN_ONCE(vblank->dev, !list_empty(&vblank->pending_work), |
77 | "Cancelling pending vblank works!\n" ); |
78 | |
79 | list_for_each_entry_safe(work, next, &vblank->pending_work, node) { |
80 | list_del_init(entry: &work->node); |
81 | drm_vblank_put(dev: vblank->dev, pipe: vblank->pipe); |
82 | } |
83 | |
84 | wake_up_all(&vblank->work_wait_queue); |
85 | } |
86 | |
87 | /** |
88 | * drm_vblank_work_schedule - schedule a vblank work |
89 | * @work: vblank work to schedule |
90 | * @count: target vblank count |
91 | * @nextonmiss: defer until the next vblank if target vblank was missed |
92 | * |
93 | * Schedule @work for execution once the crtc vblank count reaches @count. |
94 | * |
95 | * If the crtc vblank count has already reached @count and @nextonmiss is |
96 | * %false the work starts to execute immediately. |
97 | * |
98 | * If the crtc vblank count has already reached @count and @nextonmiss is |
99 | * %true the work is deferred until the next vblank (as if @count has been |
100 | * specified as crtc vblank count + 1). |
101 | * |
102 | * If @work is already scheduled, this function will reschedule said work |
103 | * using the new @count. This can be used for self-rearming work items. |
104 | * |
105 | * Returns: |
106 | * %1 if @work was successfully (re)scheduled, %0 if it was either already |
107 | * scheduled or cancelled, or a negative error code on failure. |
108 | */ |
109 | int drm_vblank_work_schedule(struct drm_vblank_work *work, |
110 | u64 count, bool nextonmiss) |
111 | { |
112 | struct drm_vblank_crtc *vblank = work->vblank; |
113 | struct drm_device *dev = vblank->dev; |
114 | u64 cur_vbl; |
115 | unsigned long irqflags; |
116 | bool passed, inmodeset, rescheduling = false, wake = false; |
117 | int ret = 0; |
118 | |
119 | spin_lock_irqsave(&dev->event_lock, irqflags); |
120 | if (work->cancelling) |
121 | goto out; |
122 | |
123 | spin_lock(lock: &dev->vbl_lock); |
124 | inmodeset = vblank->inmodeset; |
125 | spin_unlock(lock: &dev->vbl_lock); |
126 | if (inmodeset) |
127 | goto out; |
128 | |
129 | if (list_empty(head: &work->node)) { |
130 | ret = drm_vblank_get(dev, pipe: vblank->pipe); |
131 | if (ret < 0) |
132 | goto out; |
133 | } else if (work->count == count) { |
134 | /* Already scheduled w/ same vbl count */ |
135 | goto out; |
136 | } else { |
137 | rescheduling = true; |
138 | } |
139 | |
140 | work->count = count; |
141 | cur_vbl = drm_vblank_count(dev, pipe: vblank->pipe); |
142 | passed = drm_vblank_passed(seq: cur_vbl, ref: count); |
143 | if (passed) |
144 | drm_dbg_core(dev, |
145 | "crtc %d vblank %llu already passed (current %llu)\n" , |
146 | vblank->pipe, count, cur_vbl); |
147 | |
148 | if (!nextonmiss && passed) { |
149 | drm_vblank_put(dev, pipe: vblank->pipe); |
150 | ret = kthread_queue_work(worker: vblank->worker, work: &work->base); |
151 | |
152 | if (rescheduling) { |
153 | list_del_init(entry: &work->node); |
154 | wake = true; |
155 | } |
156 | } else { |
157 | if (!rescheduling) |
158 | list_add_tail(new: &work->node, head: &vblank->pending_work); |
159 | ret = true; |
160 | } |
161 | |
162 | out: |
163 | spin_unlock_irqrestore(lock: &dev->event_lock, flags: irqflags); |
164 | if (wake) |
165 | wake_up_all(&vblank->work_wait_queue); |
166 | return ret; |
167 | } |
168 | EXPORT_SYMBOL(drm_vblank_work_schedule); |
169 | |
170 | /** |
171 | * drm_vblank_work_cancel_sync - cancel a vblank work and wait for it to |
172 | * finish executing |
173 | * @work: vblank work to cancel |
174 | * |
175 | * Cancel an already scheduled vblank work and wait for its |
176 | * execution to finish. |
177 | * |
178 | * On return, @work is guaranteed to no longer be scheduled or running, even |
179 | * if it's self-arming. |
180 | * |
181 | * Returns: |
182 | * %True if the work was cancelled before it started to execute, %false |
183 | * otherwise. |
184 | */ |
185 | bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work) |
186 | { |
187 | struct drm_vblank_crtc *vblank = work->vblank; |
188 | struct drm_device *dev = vblank->dev; |
189 | bool ret = false; |
190 | |
191 | spin_lock_irq(lock: &dev->event_lock); |
192 | if (!list_empty(head: &work->node)) { |
193 | list_del_init(entry: &work->node); |
194 | drm_vblank_put(dev: vblank->dev, pipe: vblank->pipe); |
195 | ret = true; |
196 | } |
197 | |
198 | work->cancelling++; |
199 | spin_unlock_irq(lock: &dev->event_lock); |
200 | |
201 | wake_up_all(&vblank->work_wait_queue); |
202 | |
203 | if (kthread_cancel_work_sync(work: &work->base)) |
204 | ret = true; |
205 | |
206 | spin_lock_irq(lock: &dev->event_lock); |
207 | work->cancelling--; |
208 | spin_unlock_irq(lock: &dev->event_lock); |
209 | |
210 | return ret; |
211 | } |
212 | EXPORT_SYMBOL(drm_vblank_work_cancel_sync); |
213 | |
214 | /** |
215 | * drm_vblank_work_flush - wait for a scheduled vblank work to finish |
216 | * executing |
217 | * @work: vblank work to flush |
218 | * |
219 | * Wait until @work has finished executing once. |
220 | */ |
221 | void drm_vblank_work_flush(struct drm_vblank_work *work) |
222 | { |
223 | struct drm_vblank_crtc *vblank = work->vblank; |
224 | struct drm_device *dev = vblank->dev; |
225 | |
226 | spin_lock_irq(lock: &dev->event_lock); |
227 | wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node), |
228 | dev->event_lock); |
229 | spin_unlock_irq(lock: &dev->event_lock); |
230 | |
231 | kthread_flush_work(work: &work->base); |
232 | } |
233 | EXPORT_SYMBOL(drm_vblank_work_flush); |
234 | |
235 | /** |
236 | * drm_vblank_work_init - initialize a vblank work item |
237 | * @work: vblank work item |
238 | * @crtc: CRTC whose vblank will trigger the work execution |
239 | * @func: work function to be executed |
240 | * |
241 | * Initialize a vblank work item for a specific crtc. |
242 | */ |
243 | void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc, |
244 | void (*func)(struct kthread_work *work)) |
245 | { |
246 | kthread_init_work(&work->base, func); |
247 | INIT_LIST_HEAD(list: &work->node); |
248 | work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)]; |
249 | } |
250 | EXPORT_SYMBOL(drm_vblank_work_init); |
251 | |
252 | int drm_vblank_worker_init(struct drm_vblank_crtc *vblank) |
253 | { |
254 | struct kthread_worker *worker; |
255 | |
256 | INIT_LIST_HEAD(list: &vblank->pending_work); |
257 | init_waitqueue_head(&vblank->work_wait_queue); |
258 | worker = kthread_create_worker(flags: 0, namefmt: "card%d-crtc%d" , |
259 | vblank->dev->primary->index, |
260 | vblank->pipe); |
261 | if (IS_ERR(ptr: worker)) |
262 | return PTR_ERR(ptr: worker); |
263 | |
264 | vblank->worker = worker; |
265 | |
266 | sched_set_fifo(p: worker->task); |
267 | return 0; |
268 | } |
269 | |