1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Fence mechanism for dma-buf to allow for asynchronous dma access |
4 | * |
5 | * Copyright (C) 2012 Canonical Ltd |
6 | * Copyright (C) 2012 Texas Instruments |
7 | * |
8 | * Authors: |
9 | * Rob Clark <robdclark@gmail.com> |
10 | * Maarten Lankhorst <maarten.lankhorst@canonical.com> |
11 | */ |
12 | |
13 | #ifndef __LINUX_DMA_FENCE_H |
14 | #define __LINUX_DMA_FENCE_H |
15 | |
16 | #include <linux/err.h> |
17 | #include <linux/wait.h> |
18 | #include <linux/list.h> |
19 | #include <linux/bitops.h> |
20 | #include <linux/kref.h> |
21 | #include <linux/sched.h> |
22 | #include <linux/printk.h> |
23 | #include <linux/rcupdate.h> |
24 | #include <linux/timekeeping.h> |
25 | |
26 | struct dma_fence; |
27 | struct dma_fence_ops; |
28 | struct dma_fence_cb; |
29 | |
30 | /** |
31 | * struct dma_fence - software synchronization primitive |
32 | * @refcount: refcount for this fence |
33 | * @ops: dma_fence_ops associated with this fence |
34 | * @rcu: used for releasing fence with kfree_rcu |
35 | * @cb_list: list of all callbacks to call |
36 | * @lock: spin_lock_irqsave used for locking |
37 | * @context: execution context this fence belongs to, returned by |
38 | * dma_fence_context_alloc() |
39 | * @seqno: the sequence number of this fence inside the execution context, |
40 | * can be compared to decide which fence would be signaled later. |
41 | * @flags: A mask of DMA_FENCE_FLAG_* defined below |
42 | * @timestamp: Timestamp when the fence was signaled. |
43 | * @error: Optional, only valid if < 0, must be set before calling |
44 | * dma_fence_signal, indicates that the fence has completed with an error. |
45 | * |
46 | * the flags member must be manipulated and read using the appropriate |
47 | * atomic ops (bit_*), so taking the spinlock will not be needed most |
48 | * of the time. |
49 | * |
50 | * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled |
51 | * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling |
52 | * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called |
53 | * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the |
54 | * implementer of the fence for its own purposes. Can be used in different |
55 | * ways by different fence implementers, so do not rely on this. |
56 | * |
57 | * Since atomic bitops are used, this is not guaranteed to be the case. |
58 | * Particularly, if the bit was set, but dma_fence_signal was called right |
59 | * before this bit was set, it would have been able to set the |
60 | * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. |
61 | * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting |
62 | * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that |
63 | * after dma_fence_signal was called, any enable_signaling call will have either |
64 | * been completed, or never called at all. |
65 | */ |
66 | struct dma_fence { |
67 | spinlock_t *lock; |
68 | const struct dma_fence_ops *ops; |
69 | /* |
70 | * We clear the callback list on kref_put so that by the time we |
71 | * release the fence it is unused. No one should be adding to the |
72 | * cb_list that they don't themselves hold a reference for. |
73 | * |
74 | * The lifetime of the timestamp is similarly tied to both the |
75 | * rcu freelist and the cb_list. The timestamp is only set upon |
76 | * signaling while simultaneously notifying the cb_list. Ergo, we |
77 | * only use either the cb_list of timestamp. Upon destruction, |
78 | * neither are accessible, and so we can use the rcu. This means |
79 | * that the cb_list is *only* valid until the signal bit is set, |
80 | * and to read either you *must* hold a reference to the fence, |
81 | * and not just the rcu_read_lock. |
82 | * |
83 | * Listed in chronological order. |
84 | */ |
85 | union { |
86 | struct list_head cb_list; |
87 | /* @cb_list replaced by @timestamp on dma_fence_signal() */ |
88 | ktime_t timestamp; |
89 | /* @timestamp replaced by @rcu on dma_fence_release() */ |
90 | struct rcu_head rcu; |
91 | }; |
92 | u64 context; |
93 | u64 seqno; |
94 | unsigned long flags; |
95 | struct kref refcount; |
96 | int error; |
97 | }; |
98 | |
99 | enum dma_fence_flag_bits { |
100 | DMA_FENCE_FLAG_SIGNALED_BIT, |
101 | DMA_FENCE_FLAG_TIMESTAMP_BIT, |
102 | DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, |
103 | DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ |
104 | }; |
105 | |
106 | typedef void (*dma_fence_func_t)(struct dma_fence *fence, |
107 | struct dma_fence_cb *cb); |
108 | |
109 | /** |
110 | * struct dma_fence_cb - callback for dma_fence_add_callback() |
111 | * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list |
112 | * @func: dma_fence_func_t to call |
113 | * |
114 | * This struct will be initialized by dma_fence_add_callback(), additional |
115 | * data can be passed along by embedding dma_fence_cb in another struct. |
116 | */ |
117 | struct dma_fence_cb { |
118 | struct list_head node; |
119 | dma_fence_func_t func; |
120 | }; |
121 | |
122 | /** |
123 | * struct dma_fence_ops - operations implemented for fence |
124 | * |
125 | */ |
126 | struct dma_fence_ops { |
127 | /** |
128 | * @use_64bit_seqno: |
129 | * |
130 | * True if this dma_fence implementation uses 64bit seqno, false |
131 | * otherwise. |
132 | */ |
133 | bool use_64bit_seqno; |
134 | |
135 | /** |
136 | * @get_driver_name: |
137 | * |
138 | * Returns the driver name. This is a callback to allow drivers to |
139 | * compute the name at runtime, without having it to store permanently |
140 | * for each fence, or build a cache of some sort. |
141 | * |
142 | * This callback is mandatory. |
143 | */ |
144 | const char * (*get_driver_name)(struct dma_fence *fence); |
145 | |
146 | /** |
147 | * @get_timeline_name: |
148 | * |
149 | * Return the name of the context this fence belongs to. This is a |
150 | * callback to allow drivers to compute the name at runtime, without |
151 | * having it to store permanently for each fence, or build a cache of |
152 | * some sort. |
153 | * |
154 | * This callback is mandatory. |
155 | */ |
156 | const char * (*get_timeline_name)(struct dma_fence *fence); |
157 | |
158 | /** |
159 | * @enable_signaling: |
160 | * |
161 | * Enable software signaling of fence. |
162 | * |
163 | * For fence implementations that have the capability for hw->hw |
164 | * signaling, they can implement this op to enable the necessary |
165 | * interrupts, or insert commands into cmdstream, etc, to avoid these |
166 | * costly operations for the common case where only hw->hw |
167 | * synchronization is required. This is called in the first |
168 | * dma_fence_wait() or dma_fence_add_callback() path to let the fence |
169 | * implementation know that there is another driver waiting on the |
170 | * signal (ie. hw->sw case). |
171 | * |
172 | * This is called with irq's disabled, so only spinlocks which disable |
173 | * IRQ's can be used in the code outside of this callback. |
174 | * |
175 | * A return value of false indicates the fence already passed, |
176 | * or some failure occurred that made it impossible to enable |
177 | * signaling. True indicates successful enabling. |
178 | * |
179 | * &dma_fence.error may be set in enable_signaling, but only when false |
180 | * is returned. |
181 | * |
182 | * Since many implementations can call dma_fence_signal() even when before |
183 | * @enable_signaling has been called there's a race window, where the |
184 | * dma_fence_signal() might result in the final fence reference being |
185 | * released and its memory freed. To avoid this, implementations of this |
186 | * callback should grab their own reference using dma_fence_get(), to be |
187 | * released when the fence is signalled (through e.g. the interrupt |
188 | * handler). |
189 | * |
190 | * This callback is optional. If this callback is not present, then the |
191 | * driver must always have signaling enabled. |
192 | */ |
193 | bool (*enable_signaling)(struct dma_fence *fence); |
194 | |
195 | /** |
196 | * @signaled: |
197 | * |
198 | * Peek whether the fence is signaled, as a fastpath optimization for |
199 | * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this |
200 | * callback does not need to make any guarantees beyond that a fence |
201 | * once indicates as signalled must always return true from this |
202 | * callback. This callback may return false even if the fence has |
203 | * completed already, in this case information hasn't propogated throug |
204 | * the system yet. See also dma_fence_is_signaled(). |
205 | * |
206 | * May set &dma_fence.error if returning true. |
207 | * |
208 | * This callback is optional. |
209 | */ |
210 | bool (*signaled)(struct dma_fence *fence); |
211 | |
212 | /** |
213 | * @wait: |
214 | * |
215 | * Custom wait implementation, defaults to dma_fence_default_wait() if |
216 | * not set. |
217 | * |
218 | * Deprecated and should not be used by new implementations. Only used |
219 | * by existing implementations which need special handling for their |
220 | * hardware reset procedure. |
221 | * |
222 | * Must return -ERESTARTSYS if the wait is intr = true and the wait was |
223 | * interrupted, and remaining jiffies if fence has signaled, or 0 if wait |
224 | * timed out. Can also return other error values on custom implementations, |
225 | * which should be treated as if the fence is signaled. For example a hardware |
226 | * lockup could be reported like that. |
227 | */ |
228 | signed long (*wait)(struct dma_fence *fence, |
229 | bool intr, signed long timeout); |
230 | |
231 | /** |
232 | * @release: |
233 | * |
234 | * Called on destruction of fence to release additional resources. |
235 | * Can be called from irq context. This callback is optional. If it is |
236 | * NULL, then dma_fence_free() is instead called as the default |
237 | * implementation. |
238 | */ |
239 | void (*release)(struct dma_fence *fence); |
240 | |
241 | /** |
242 | * @set_deadline: |
243 | * |
244 | * Callback to allow a fence waiter to inform the fence signaler of |
245 | * an upcoming deadline, such as vblank, by which point the waiter |
246 | * would prefer the fence to be signaled by. This is intended to |
247 | * give feedback to the fence signaler to aid in power management |
248 | * decisions, such as boosting GPU frequency. |
249 | * |
250 | * This is called without &dma_fence.lock held, it can be called |
251 | * multiple times and from any context. Locking is up to the callee |
252 | * if it has some state to manage. If multiple deadlines are set, |
253 | * the expectation is to track the soonest one. If the deadline is |
254 | * before the current time, it should be interpreted as an immediate |
255 | * deadline. |
256 | * |
257 | * This callback is optional. |
258 | */ |
259 | void (*set_deadline)(struct dma_fence *fence, ktime_t deadline); |
260 | }; |
261 | |
262 | void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, |
263 | spinlock_t *lock, u64 context, u64 seqno); |
264 | |
265 | void dma_fence_release(struct kref *kref); |
266 | void dma_fence_free(struct dma_fence *fence); |
267 | void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq); |
268 | |
269 | /** |
270 | * dma_fence_put - decreases refcount of the fence |
271 | * @fence: fence to reduce refcount of |
272 | */ |
273 | static inline void dma_fence_put(struct dma_fence *fence) |
274 | { |
275 | if (fence) |
276 | kref_put(kref: &fence->refcount, release: dma_fence_release); |
277 | } |
278 | |
279 | /** |
280 | * dma_fence_get - increases refcount of the fence |
281 | * @fence: fence to increase refcount of |
282 | * |
283 | * Returns the same fence, with refcount increased by 1. |
284 | */ |
285 | static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) |
286 | { |
287 | if (fence) |
288 | kref_get(kref: &fence->refcount); |
289 | return fence; |
290 | } |
291 | |
292 | /** |
293 | * dma_fence_get_rcu - get a fence from a dma_resv_list with |
294 | * rcu read lock |
295 | * @fence: fence to increase refcount of |
296 | * |
297 | * Function returns NULL if no refcount could be obtained, or the fence. |
298 | */ |
299 | static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) |
300 | { |
301 | if (kref_get_unless_zero(kref: &fence->refcount)) |
302 | return fence; |
303 | else |
304 | return NULL; |
305 | } |
306 | |
307 | /** |
308 | * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence |
309 | * @fencep: pointer to fence to increase refcount of |
310 | * |
311 | * Function returns NULL if no refcount could be obtained, or the fence. |
312 | * This function handles acquiring a reference to a fence that may be |
313 | * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU), |
314 | * so long as the caller is using RCU on the pointer to the fence. |
315 | * |
316 | * An alternative mechanism is to employ a seqlock to protect a bunch of |
317 | * fences, such as used by struct dma_resv. When using a seqlock, |
318 | * the seqlock must be taken before and checked after a reference to the |
319 | * fence is acquired (as shown here). |
320 | * |
321 | * The caller is required to hold the RCU read lock. |
322 | */ |
323 | static inline struct dma_fence * |
324 | dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep) |
325 | { |
326 | do { |
327 | struct dma_fence *fence; |
328 | |
329 | fence = rcu_dereference(*fencep); |
330 | if (!fence) |
331 | return NULL; |
332 | |
333 | if (!dma_fence_get_rcu(fence)) |
334 | continue; |
335 | |
336 | /* The atomic_inc_not_zero() inside dma_fence_get_rcu() |
337 | * provides a full memory barrier upon success (such as now). |
338 | * This is paired with the write barrier from assigning |
339 | * to the __rcu protected fence pointer so that if that |
340 | * pointer still matches the current fence, we know we |
341 | * have successfully acquire a reference to it. If it no |
342 | * longer matches, we are holding a reference to some other |
343 | * reallocated pointer. This is possible if the allocator |
344 | * is using a freelist like SLAB_TYPESAFE_BY_RCU where the |
345 | * fence remains valid for the RCU grace period, but it |
346 | * may be reallocated. When using such allocators, we are |
347 | * responsible for ensuring the reference we get is to |
348 | * the right fence, as below. |
349 | */ |
350 | if (fence == rcu_access_pointer(*fencep)) |
351 | return rcu_pointer_handoff(fence); |
352 | |
353 | dma_fence_put(fence); |
354 | } while (1); |
355 | } |
356 | |
357 | #ifdef CONFIG_LOCKDEP |
358 | bool dma_fence_begin_signalling(void); |
359 | void dma_fence_end_signalling(bool cookie); |
360 | void __dma_fence_might_wait(void); |
361 | #else |
362 | static inline bool dma_fence_begin_signalling(void) |
363 | { |
364 | return true; |
365 | } |
366 | static inline void dma_fence_end_signalling(bool cookie) {} |
367 | static inline void __dma_fence_might_wait(void) {} |
368 | #endif |
369 | |
370 | int dma_fence_signal(struct dma_fence *fence); |
371 | int dma_fence_signal_locked(struct dma_fence *fence); |
372 | int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp); |
373 | int dma_fence_signal_timestamp_locked(struct dma_fence *fence, |
374 | ktime_t timestamp); |
375 | signed long dma_fence_default_wait(struct dma_fence *fence, |
376 | bool intr, signed long timeout); |
377 | int dma_fence_add_callback(struct dma_fence *fence, |
378 | struct dma_fence_cb *cb, |
379 | dma_fence_func_t func); |
380 | bool dma_fence_remove_callback(struct dma_fence *fence, |
381 | struct dma_fence_cb *cb); |
382 | void dma_fence_enable_sw_signaling(struct dma_fence *fence); |
383 | |
384 | /** |
385 | * dma_fence_is_signaled_locked - Return an indication if the fence |
386 | * is signaled yet. |
387 | * @fence: the fence to check |
388 | * |
389 | * Returns true if the fence was already signaled, false if not. Since this |
390 | * function doesn't enable signaling, it is not guaranteed to ever return |
391 | * true if dma_fence_add_callback(), dma_fence_wait() or |
392 | * dma_fence_enable_sw_signaling() haven't been called before. |
393 | * |
394 | * This function requires &dma_fence.lock to be held. |
395 | * |
396 | * See also dma_fence_is_signaled(). |
397 | */ |
398 | static inline bool |
399 | dma_fence_is_signaled_locked(struct dma_fence *fence) |
400 | { |
401 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
402 | return true; |
403 | |
404 | if (fence->ops->signaled && fence->ops->signaled(fence)) { |
405 | dma_fence_signal_locked(fence); |
406 | return true; |
407 | } |
408 | |
409 | return false; |
410 | } |
411 | |
412 | /** |
413 | * dma_fence_is_signaled - Return an indication if the fence is signaled yet. |
414 | * @fence: the fence to check |
415 | * |
416 | * Returns true if the fence was already signaled, false if not. Since this |
417 | * function doesn't enable signaling, it is not guaranteed to ever return |
418 | * true if dma_fence_add_callback(), dma_fence_wait() or |
419 | * dma_fence_enable_sw_signaling() haven't been called before. |
420 | * |
421 | * It's recommended for seqno fences to call dma_fence_signal when the |
422 | * operation is complete, it makes it possible to prevent issues from |
423 | * wraparound between time of issue and time of use by checking the return |
424 | * value of this function before calling hardware-specific wait instructions. |
425 | * |
426 | * See also dma_fence_is_signaled_locked(). |
427 | */ |
428 | static inline bool |
429 | dma_fence_is_signaled(struct dma_fence *fence) |
430 | { |
431 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
432 | return true; |
433 | |
434 | if (fence->ops->signaled && fence->ops->signaled(fence)) { |
435 | dma_fence_signal(fence); |
436 | return true; |
437 | } |
438 | |
439 | return false; |
440 | } |
441 | |
442 | /** |
443 | * __dma_fence_is_later - return if f1 is chronologically later than f2 |
444 | * @f1: the first fence's seqno |
445 | * @f2: the second fence's seqno from the same context |
446 | * @ops: dma_fence_ops associated with the seqno |
447 | * |
448 | * Returns true if f1 is chronologically later than f2. Both fences must be |
449 | * from the same context, since a seqno is not common across contexts. |
450 | */ |
451 | static inline bool __dma_fence_is_later(u64 f1, u64 f2, |
452 | const struct dma_fence_ops *ops) |
453 | { |
454 | /* This is for backward compatibility with drivers which can only handle |
455 | * 32bit sequence numbers. Use a 64bit compare when the driver says to |
456 | * do so. |
457 | */ |
458 | if (ops->use_64bit_seqno) |
459 | return f1 > f2; |
460 | |
461 | return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; |
462 | } |
463 | |
464 | /** |
465 | * dma_fence_is_later - return if f1 is chronologically later than f2 |
466 | * @f1: the first fence from the same context |
467 | * @f2: the second fence from the same context |
468 | * |
469 | * Returns true if f1 is chronologically later than f2. Both fences must be |
470 | * from the same context, since a seqno is not re-used across contexts. |
471 | */ |
472 | static inline bool dma_fence_is_later(struct dma_fence *f1, |
473 | struct dma_fence *f2) |
474 | { |
475 | if (WARN_ON(f1->context != f2->context)) |
476 | return false; |
477 | |
478 | return __dma_fence_is_later(f1: f1->seqno, f2: f2->seqno, ops: f1->ops); |
479 | } |
480 | |
481 | /** |
482 | * dma_fence_is_later_or_same - return true if f1 is later or same as f2 |
483 | * @f1: the first fence from the same context |
484 | * @f2: the second fence from the same context |
485 | * |
486 | * Returns true if f1 is chronologically later than f2 or the same fence. Both |
487 | * fences must be from the same context, since a seqno is not re-used across |
488 | * contexts. |
489 | */ |
490 | static inline bool dma_fence_is_later_or_same(struct dma_fence *f1, |
491 | struct dma_fence *f2) |
492 | { |
493 | return f1 == f2 || dma_fence_is_later(f1, f2); |
494 | } |
495 | |
496 | /** |
497 | * dma_fence_later - return the chronologically later fence |
498 | * @f1: the first fence from the same context |
499 | * @f2: the second fence from the same context |
500 | * |
501 | * Returns NULL if both fences are signaled, otherwise the fence that would be |
502 | * signaled last. Both fences must be from the same context, since a seqno is |
503 | * not re-used across contexts. |
504 | */ |
505 | static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, |
506 | struct dma_fence *f2) |
507 | { |
508 | if (WARN_ON(f1->context != f2->context)) |
509 | return NULL; |
510 | |
511 | /* |
512 | * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never |
513 | * have been set if enable_signaling wasn't called, and enabling that |
514 | * here is overkill. |
515 | */ |
516 | if (dma_fence_is_later(f1, f2)) |
517 | return dma_fence_is_signaled(fence: f1) ? NULL : f1; |
518 | else |
519 | return dma_fence_is_signaled(fence: f2) ? NULL : f2; |
520 | } |
521 | |
522 | /** |
523 | * dma_fence_get_status_locked - returns the status upon completion |
524 | * @fence: the dma_fence to query |
525 | * |
526 | * Drivers can supply an optional error status condition before they signal |
527 | * the fence (to indicate whether the fence was completed due to an error |
528 | * rather than success). The value of the status condition is only valid |
529 | * if the fence has been signaled, dma_fence_get_status_locked() first checks |
530 | * the signal state before reporting the error status. |
531 | * |
532 | * Returns 0 if the fence has not yet been signaled, 1 if the fence has |
533 | * been signaled without an error condition, or a negative error code |
534 | * if the fence has been completed in err. |
535 | */ |
536 | static inline int dma_fence_get_status_locked(struct dma_fence *fence) |
537 | { |
538 | if (dma_fence_is_signaled_locked(fence)) |
539 | return fence->error ?: 1; |
540 | else |
541 | return 0; |
542 | } |
543 | |
544 | int dma_fence_get_status(struct dma_fence *fence); |
545 | |
546 | /** |
547 | * dma_fence_set_error - flag an error condition on the fence |
548 | * @fence: the dma_fence |
549 | * @error: the error to store |
550 | * |
551 | * Drivers can supply an optional error status condition before they signal |
552 | * the fence, to indicate that the fence was completed due to an error |
553 | * rather than success. This must be set before signaling (so that the value |
554 | * is visible before any waiters on the signal callback are woken). This |
555 | * helper exists to help catching erroneous setting of #dma_fence.error. |
556 | * |
557 | * Examples of error codes which drivers should use: |
558 | * |
559 | * * %-ENODATA This operation produced no data, no other operation affected. |
560 | * * %-ECANCELED All operations from the same context have been canceled. |
561 | * * %-ETIME Operation caused a timeout and potentially device reset. |
562 | */ |
563 | static inline void dma_fence_set_error(struct dma_fence *fence, |
564 | int error) |
565 | { |
566 | WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); |
567 | WARN_ON(error >= 0 || error < -MAX_ERRNO); |
568 | |
569 | fence->error = error; |
570 | } |
571 | |
572 | /** |
573 | * dma_fence_timestamp - helper to get the completion timestamp of a fence |
574 | * @fence: fence to get the timestamp from. |
575 | * |
576 | * After a fence is signaled the timestamp is updated with the signaling time, |
577 | * but setting the timestamp can race with tasks waiting for the signaling. This |
578 | * helper busy waits for the correct timestamp to appear. |
579 | */ |
580 | static inline ktime_t dma_fence_timestamp(struct dma_fence *fence) |
581 | { |
582 | if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) |
583 | return ktime_get(); |
584 | |
585 | while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) |
586 | cpu_relax(); |
587 | |
588 | return fence->timestamp; |
589 | } |
590 | |
591 | signed long dma_fence_wait_timeout(struct dma_fence *, |
592 | bool intr, signed long timeout); |
593 | signed long dma_fence_wait_any_timeout(struct dma_fence **fences, |
594 | uint32_t count, |
595 | bool intr, signed long timeout, |
596 | uint32_t *idx); |
597 | |
598 | /** |
599 | * dma_fence_wait - sleep until the fence gets signaled |
600 | * @fence: the fence to wait on |
601 | * @intr: if true, do an interruptible wait |
602 | * |
603 | * This function will return -ERESTARTSYS if interrupted by a signal, |
604 | * or 0 if the fence was signaled. Other error values may be |
605 | * returned on custom implementations. |
606 | * |
607 | * Performs a synchronous wait on this fence. It is assumed the caller |
608 | * directly or indirectly holds a reference to the fence, otherwise the |
609 | * fence might be freed before return, resulting in undefined behavior. |
610 | * |
611 | * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout(). |
612 | */ |
613 | static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) |
614 | { |
615 | signed long ret; |
616 | |
617 | /* Since dma_fence_wait_timeout cannot timeout with |
618 | * MAX_SCHEDULE_TIMEOUT, only valid return values are |
619 | * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. |
620 | */ |
621 | ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); |
622 | |
623 | return ret < 0 ? ret : 0; |
624 | } |
625 | |
626 | void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline); |
627 | |
628 | struct dma_fence *dma_fence_get_stub(void); |
629 | struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp); |
630 | u64 dma_fence_context_alloc(unsigned num); |
631 | |
632 | extern const struct dma_fence_ops dma_fence_array_ops; |
633 | extern const struct dma_fence_ops dma_fence_chain_ops; |
634 | |
635 | /** |
636 | * dma_fence_is_array - check if a fence is from the array subclass |
637 | * @fence: the fence to test |
638 | * |
639 | * Return true if it is a dma_fence_array and false otherwise. |
640 | */ |
641 | static inline bool dma_fence_is_array(struct dma_fence *fence) |
642 | { |
643 | return fence->ops == &dma_fence_array_ops; |
644 | } |
645 | |
646 | /** |
647 | * dma_fence_is_chain - check if a fence is from the chain subclass |
648 | * @fence: the fence to test |
649 | * |
650 | * Return true if it is a dma_fence_chain and false otherwise. |
651 | */ |
652 | static inline bool dma_fence_is_chain(struct dma_fence *fence) |
653 | { |
654 | return fence->ops == &dma_fence_chain_ops; |
655 | } |
656 | |
657 | /** |
658 | * dma_fence_is_container - check if a fence is a container for other fences |
659 | * @fence: the fence to test |
660 | * |
661 | * Return true if this fence is a container for other fences, false otherwise. |
662 | * This is important since we can't build up large fence structure or otherwise |
663 | * we run into recursion during operation on those fences. |
664 | */ |
665 | static inline bool dma_fence_is_container(struct dma_fence *fence) |
666 | { |
667 | return dma_fence_is_array(fence) || dma_fence_is_chain(fence); |
668 | } |
669 | |
670 | #endif /* __LINUX_DMA_FENCE_H */ |
671 | |