1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_WAIT_H |
3 | #define _LINUX_WAIT_H |
4 | /* |
5 | * Linux wait queue related types and methods |
6 | */ |
7 | #include <linux/list.h> |
8 | #include <linux/stddef.h> |
9 | #include <linux/spinlock.h> |
10 | |
11 | #include <asm/current.h> |
12 | #include <uapi/linux/wait.h> |
13 | |
14 | typedef struct wait_queue_entry wait_queue_entry_t; |
15 | |
16 | typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); |
17 | int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); |
18 | |
19 | /* wait_queue_entry::flags */ |
20 | #define WQ_FLAG_EXCLUSIVE 0x01 |
21 | #define WQ_FLAG_WOKEN 0x02 |
22 | #define WQ_FLAG_BOOKMARK 0x04 |
23 | #define WQ_FLAG_CUSTOM 0x08 |
24 | #define WQ_FLAG_DONE 0x10 |
25 | #define WQ_FLAG_PRIORITY 0x20 |
26 | |
27 | /* |
28 | * A single wait-queue entry structure: |
29 | */ |
30 | struct wait_queue_entry { |
31 | unsigned int flags; |
32 | void *private; |
33 | wait_queue_func_t func; |
34 | struct list_head entry; |
35 | }; |
36 | |
37 | struct wait_queue_head { |
38 | spinlock_t lock; |
39 | struct list_head head; |
40 | }; |
41 | typedef struct wait_queue_head wait_queue_head_t; |
42 | |
43 | struct task_struct; |
44 | |
45 | /* |
46 | * Macros for declaration and initialisaton of the datatypes |
47 | */ |
48 | |
49 | #define __WAITQUEUE_INITIALIZER(name, tsk) { \ |
50 | .private = tsk, \ |
51 | .func = default_wake_function, \ |
52 | .entry = { NULL, NULL } } |
53 | |
54 | #define DECLARE_WAITQUEUE(name, tsk) \ |
55 | struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk) |
56 | |
57 | #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ |
58 | .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ |
59 | .head = LIST_HEAD_INIT(name.head) } |
60 | |
61 | #define DECLARE_WAIT_QUEUE_HEAD(name) \ |
62 | struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name) |
63 | |
64 | extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *); |
65 | |
66 | #define init_waitqueue_head(wq_head) \ |
67 | do { \ |
68 | static struct lock_class_key __key; \ |
69 | \ |
70 | __init_waitqueue_head((wq_head), #wq_head, &__key); \ |
71 | } while (0) |
72 | |
73 | #ifdef CONFIG_LOCKDEP |
74 | # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ |
75 | ({ init_waitqueue_head(&name); name; }) |
76 | # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ |
77 | struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) |
78 | #else |
79 | # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) |
80 | #endif |
81 | |
82 | static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p) |
83 | { |
84 | wq_entry->flags = 0; |
85 | wq_entry->private = p; |
86 | wq_entry->func = default_wake_function; |
87 | } |
88 | |
89 | static inline void |
90 | init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func) |
91 | { |
92 | wq_entry->flags = 0; |
93 | wq_entry->private = NULL; |
94 | wq_entry->func = func; |
95 | } |
96 | |
97 | /** |
98 | * waitqueue_active -- locklessly test for waiters on the queue |
99 | * @wq_head: the waitqueue to test for waiters |
100 | * |
101 | * returns true if the wait list is not empty |
102 | * |
103 | * NOTE: this function is lockless and requires care, incorrect usage _will_ |
104 | * lead to sporadic and non-obvious failure. |
105 | * |
106 | * Use either while holding wait_queue_head::lock or when used for wakeups |
107 | * with an extra smp_mb() like:: |
108 | * |
109 | * CPU0 - waker CPU1 - waiter |
110 | * |
111 | * for (;;) { |
112 | * @cond = true; prepare_to_wait(&wq_head, &wait, state); |
113 | * smp_mb(); // smp_mb() from set_current_state() |
114 | * if (waitqueue_active(wq_head)) if (@cond) |
115 | * wake_up(wq_head); break; |
116 | * schedule(); |
117 | * } |
118 | * finish_wait(&wq_head, &wait); |
119 | * |
120 | * Because without the explicit smp_mb() it's possible for the |
121 | * waitqueue_active() load to get hoisted over the @cond store such that we'll |
122 | * observe an empty wait list while the waiter might not observe @cond. |
123 | * |
124 | * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), |
125 | * which (when the lock is uncontended) are of roughly equal cost. |
126 | */ |
127 | static inline int waitqueue_active(struct wait_queue_head *wq_head) |
128 | { |
129 | return !list_empty(&wq_head->head); |
130 | } |
131 | |
132 | /** |
133 | * wq_has_single_sleeper - check if there is only one sleeper |
134 | * @wq_head: wait queue head |
135 | * |
136 | * Returns true of wq_head has only one sleeper on the list. |
137 | * |
138 | * Please refer to the comment for waitqueue_active. |
139 | */ |
140 | static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head) |
141 | { |
142 | return list_is_singular(&wq_head->head); |
143 | } |
144 | |
145 | /** |
146 | * wq_has_sleeper - check if there are any waiting processes |
147 | * @wq_head: wait queue head |
148 | * |
149 | * Returns true if wq_head has waiting processes |
150 | * |
151 | * Please refer to the comment for waitqueue_active. |
152 | */ |
153 | static inline bool wq_has_sleeper(struct wait_queue_head *wq_head) |
154 | { |
155 | /* |
156 | * We need to be sure we are in sync with the |
157 | * add_wait_queue modifications to the wait queue. |
158 | * |
159 | * This memory barrier should be paired with one on the |
160 | * waiting side. |
161 | */ |
162 | smp_mb(); |
163 | return waitqueue_active(wq_head); |
164 | } |
165 | |
166 | extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); |
167 | extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); |
168 | extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); |
169 | extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); |
170 | |
171 | static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
172 | { |
173 | struct list_head *head = &wq_head->head; |
174 | struct wait_queue_entry *wq; |
175 | |
176 | list_for_each_entry(wq, &wq_head->head, entry) { |
177 | if (!(wq->flags & WQ_FLAG_PRIORITY)) |
178 | break; |
179 | head = &wq->entry; |
180 | } |
181 | list_add(&wq_entry->entry, head); |
182 | } |
183 | |
184 | /* |
185 | * Used for wake-one threads: |
186 | */ |
187 | static inline void |
188 | __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
189 | { |
190 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
191 | __add_wait_queue(wq_head, wq_entry); |
192 | } |
193 | |
194 | static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
195 | { |
196 | list_add_tail(&wq_entry->entry, &wq_head->head); |
197 | } |
198 | |
199 | static inline void |
200 | __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
201 | { |
202 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
203 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
204 | } |
205 | |
206 | static inline void |
207 | __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
208 | { |
209 | list_del(&wq_entry->entry); |
210 | } |
211 | |
212 | void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); |
213 | void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); |
214 | void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, |
215 | unsigned int mode, void *key, wait_queue_entry_t *bookmark); |
216 | void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); |
217 | void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); |
218 | void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); |
219 | void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); |
220 | void __wake_up_pollfree(struct wait_queue_head *wq_head); |
221 | |
222 | #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) |
223 | #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) |
224 | #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) |
225 | #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) |
226 | #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) |
227 | |
228 | #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) |
229 | #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) |
230 | #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) |
231 | #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE) |
232 | |
233 | /* |
234 | * Wakeup macros to be used to report events to the targets. |
235 | */ |
236 | #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m)) |
237 | #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m)) |
238 | #define wake_up_poll(x, m) \ |
239 | __wake_up(x, TASK_NORMAL, 1, poll_to_key(m)) |
240 | #define wake_up_locked_poll(x, m) \ |
241 | __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m)) |
242 | #define wake_up_interruptible_poll(x, m) \ |
243 | __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m)) |
244 | #define wake_up_interruptible_sync_poll(x, m) \ |
245 | __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) |
246 | #define wake_up_interruptible_sync_poll_locked(x, m) \ |
247 | __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) |
248 | |
249 | /** |
250 | * wake_up_pollfree - signal that a polled waitqueue is going away |
251 | * @wq_head: the wait queue head |
252 | * |
253 | * In the very rare cases where a ->poll() implementation uses a waitqueue whose |
254 | * lifetime is tied to a task rather than to the 'struct file' being polled, |
255 | * this function must be called before the waitqueue is freed so that |
256 | * non-blocking polls (e.g. epoll) are notified that the queue is going away. |
257 | * |
258 | * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via |
259 | * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. |
260 | */ |
261 | static inline void wake_up_pollfree(struct wait_queue_head *wq_head) |
262 | { |
263 | /* |
264 | * For performance reasons, we don't always take the queue lock here. |
265 | * Therefore, we might race with someone removing the last entry from |
266 | * the queue, and proceed while they still hold the queue lock. |
267 | * However, rcu_read_lock() is required to be held in such cases, so we |
268 | * can safely proceed with an RCU-delayed free. |
269 | */ |
270 | if (waitqueue_active(wq_head)) |
271 | __wake_up_pollfree(wq_head); |
272 | } |
273 | |
274 | #define ___wait_cond_timeout(condition) \ |
275 | ({ \ |
276 | bool __cond = (condition); \ |
277 | if (__cond && !__ret) \ |
278 | __ret = 1; \ |
279 | __cond || !__ret; \ |
280 | }) |
281 | |
282 | #define ___wait_is_interruptible(state) \ |
283 | (!__builtin_constant_p(state) || \ |
284 | state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ |
285 | |
286 | extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags); |
287 | |
288 | /* |
289 | * The below macro ___wait_event() has an explicit shadow of the __ret |
290 | * variable when used from the wait_event_*() macros. |
291 | * |
292 | * This is so that both can use the ___wait_cond_timeout() construct |
293 | * to wrap the condition. |
294 | * |
295 | * The type inconsistency of the wait_event_*() __ret variable is also |
296 | * on purpose; we use long where we can return timeout values and int |
297 | * otherwise. |
298 | */ |
299 | |
300 | #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \ |
301 | ({ \ |
302 | __label__ __out; \ |
303 | struct wait_queue_entry __wq_entry; \ |
304 | long __ret = ret; /* explicit shadow */ \ |
305 | \ |
306 | init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ |
307 | for (;;) { \ |
308 | long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\ |
309 | \ |
310 | if (condition) \ |
311 | break; \ |
312 | \ |
313 | if (___wait_is_interruptible(state) && __int) { \ |
314 | __ret = __int; \ |
315 | goto __out; \ |
316 | } \ |
317 | \ |
318 | cmd; \ |
319 | } \ |
320 | finish_wait(&wq_head, &__wq_entry); \ |
321 | __out: __ret; \ |
322 | }) |
323 | |
324 | #define __wait_event(wq_head, condition) \ |
325 | (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ |
326 | schedule()) |
327 | |
328 | /** |
329 | * wait_event - sleep until a condition gets true |
330 | * @wq_head: the waitqueue to wait on |
331 | * @condition: a C expression for the event to wait for |
332 | * |
333 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the |
334 | * @condition evaluates to true. The @condition is checked each time |
335 | * the waitqueue @wq_head is woken up. |
336 | * |
337 | * wake_up() has to be called after changing any variable that could |
338 | * change the result of the wait condition. |
339 | */ |
340 | #define wait_event(wq_head, condition) \ |
341 | do { \ |
342 | might_sleep(); \ |
343 | if (condition) \ |
344 | break; \ |
345 | __wait_event(wq_head, condition); \ |
346 | } while (0) |
347 | |
348 | #define __io_wait_event(wq_head, condition) \ |
349 | (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ |
350 | io_schedule()) |
351 | |
352 | /* |
353 | * io_wait_event() -- like wait_event() but with io_schedule() |
354 | */ |
355 | #define io_wait_event(wq_head, condition) \ |
356 | do { \ |
357 | might_sleep(); \ |
358 | if (condition) \ |
359 | break; \ |
360 | __io_wait_event(wq_head, condition); \ |
361 | } while (0) |
362 | |
363 | #define __wait_event_freezable(wq_head, condition) \ |
364 | ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ |
365 | freezable_schedule()) |
366 | |
367 | /** |
368 | * wait_event_freezable - sleep (or freeze) until a condition gets true |
369 | * @wq_head: the waitqueue to wait on |
370 | * @condition: a C expression for the event to wait for |
371 | * |
372 | * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute |
373 | * to system load) until the @condition evaluates to true. The |
374 | * @condition is checked each time the waitqueue @wq_head is woken up. |
375 | * |
376 | * wake_up() has to be called after changing any variable that could |
377 | * change the result of the wait condition. |
378 | */ |
379 | #define wait_event_freezable(wq_head, condition) \ |
380 | ({ \ |
381 | int __ret = 0; \ |
382 | might_sleep(); \ |
383 | if (!(condition)) \ |
384 | __ret = __wait_event_freezable(wq_head, condition); \ |
385 | __ret; \ |
386 | }) |
387 | |
388 | #define __wait_event_timeout(wq_head, condition, timeout) \ |
389 | ___wait_event(wq_head, ___wait_cond_timeout(condition), \ |
390 | TASK_UNINTERRUPTIBLE, 0, timeout, \ |
391 | __ret = schedule_timeout(__ret)) |
392 | |
393 | /** |
394 | * wait_event_timeout - sleep until a condition gets true or a timeout elapses |
395 | * @wq_head: the waitqueue to wait on |
396 | * @condition: a C expression for the event to wait for |
397 | * @timeout: timeout, in jiffies |
398 | * |
399 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the |
400 | * @condition evaluates to true. The @condition is checked each time |
401 | * the waitqueue @wq_head is woken up. |
402 | * |
403 | * wake_up() has to be called after changing any variable that could |
404 | * change the result of the wait condition. |
405 | * |
406 | * Returns: |
407 | * 0 if the @condition evaluated to %false after the @timeout elapsed, |
408 | * 1 if the @condition evaluated to %true after the @timeout elapsed, |
409 | * or the remaining jiffies (at least 1) if the @condition evaluated |
410 | * to %true before the @timeout elapsed. |
411 | */ |
412 | #define wait_event_timeout(wq_head, condition, timeout) \ |
413 | ({ \ |
414 | long __ret = timeout; \ |
415 | might_sleep(); \ |
416 | if (!___wait_cond_timeout(condition)) \ |
417 | __ret = __wait_event_timeout(wq_head, condition, timeout); \ |
418 | __ret; \ |
419 | }) |
420 | |
421 | #define __wait_event_freezable_timeout(wq_head, condition, timeout) \ |
422 | ___wait_event(wq_head, ___wait_cond_timeout(condition), \ |
423 | TASK_INTERRUPTIBLE, 0, timeout, \ |
424 | __ret = freezable_schedule_timeout(__ret)) |
425 | |
426 | /* |
427 | * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid |
428 | * increasing load and is freezable. |
429 | */ |
430 | #define wait_event_freezable_timeout(wq_head, condition, timeout) \ |
431 | ({ \ |
432 | long __ret = timeout; \ |
433 | might_sleep(); \ |
434 | if (!___wait_cond_timeout(condition)) \ |
435 | __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \ |
436 | __ret; \ |
437 | }) |
438 | |
439 | #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ |
440 | (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \ |
441 | cmd1; schedule(); cmd2) |
442 | /* |
443 | * Just like wait_event_cmd(), except it sets exclusive flag |
444 | */ |
445 | #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ |
446 | do { \ |
447 | if (condition) \ |
448 | break; \ |
449 | __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \ |
450 | } while (0) |
451 | |
452 | #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \ |
453 | (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ |
454 | cmd1; schedule(); cmd2) |
455 | |
456 | /** |
457 | * wait_event_cmd - sleep until a condition gets true |
458 | * @wq_head: the waitqueue to wait on |
459 | * @condition: a C expression for the event to wait for |
460 | * @cmd1: the command will be executed before sleep |
461 | * @cmd2: the command will be executed after sleep |
462 | * |
463 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the |
464 | * @condition evaluates to true. The @condition is checked each time |
465 | * the waitqueue @wq_head is woken up. |
466 | * |
467 | * wake_up() has to be called after changing any variable that could |
468 | * change the result of the wait condition. |
469 | */ |
470 | #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \ |
471 | do { \ |
472 | if (condition) \ |
473 | break; \ |
474 | __wait_event_cmd(wq_head, condition, cmd1, cmd2); \ |
475 | } while (0) |
476 | |
477 | #define __wait_event_interruptible(wq_head, condition) \ |
478 | ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ |
479 | schedule()) |
480 | |
481 | /** |
482 | * wait_event_interruptible - sleep until a condition gets true |
483 | * @wq_head: the waitqueue to wait on |
484 | * @condition: a C expression for the event to wait for |
485 | * |
486 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
487 | * @condition evaluates to true or a signal is received. |
488 | * The @condition is checked each time the waitqueue @wq_head is woken up. |
489 | * |
490 | * wake_up() has to be called after changing any variable that could |
491 | * change the result of the wait condition. |
492 | * |
493 | * The function will return -ERESTARTSYS if it was interrupted by a |
494 | * signal and 0 if @condition evaluated to true. |
495 | */ |
496 | #define wait_event_interruptible(wq_head, condition) \ |
497 | ({ \ |
498 | int __ret = 0; \ |
499 | might_sleep(); \ |
500 | if (!(condition)) \ |
501 | __ret = __wait_event_interruptible(wq_head, condition); \ |
502 | __ret; \ |
503 | }) |
504 | |
505 | #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \ |
506 | ___wait_event(wq_head, ___wait_cond_timeout(condition), \ |
507 | TASK_INTERRUPTIBLE, 0, timeout, \ |
508 | __ret = schedule_timeout(__ret)) |
509 | |
510 | /** |
511 | * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses |
512 | * @wq_head: the waitqueue to wait on |
513 | * @condition: a C expression for the event to wait for |
514 | * @timeout: timeout, in jiffies |
515 | * |
516 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
517 | * @condition evaluates to true or a signal is received. |
518 | * The @condition is checked each time the waitqueue @wq_head is woken up. |
519 | * |
520 | * wake_up() has to be called after changing any variable that could |
521 | * change the result of the wait condition. |
522 | * |
523 | * Returns: |
524 | * 0 if the @condition evaluated to %false after the @timeout elapsed, |
525 | * 1 if the @condition evaluated to %true after the @timeout elapsed, |
526 | * the remaining jiffies (at least 1) if the @condition evaluated |
527 | * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was |
528 | * interrupted by a signal. |
529 | */ |
530 | #define wait_event_interruptible_timeout(wq_head, condition, timeout) \ |
531 | ({ \ |
532 | long __ret = timeout; \ |
533 | might_sleep(); \ |
534 | if (!___wait_cond_timeout(condition)) \ |
535 | __ret = __wait_event_interruptible_timeout(wq_head, \ |
536 | condition, timeout); \ |
537 | __ret; \ |
538 | }) |
539 | |
540 | #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \ |
541 | ({ \ |
542 | int __ret = 0; \ |
543 | struct hrtimer_sleeper __t; \ |
544 | \ |
545 | hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ |
546 | HRTIMER_MODE_REL); \ |
547 | if ((timeout) != KTIME_MAX) { \ |
548 | hrtimer_set_expires_range_ns(&__t.timer, timeout, \ |
549 | current->timer_slack_ns); \ |
550 | hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \ |
551 | } \ |
552 | \ |
553 | __ret = ___wait_event(wq_head, condition, state, 0, 0, \ |
554 | if (!__t.task) { \ |
555 | __ret = -ETIME; \ |
556 | break; \ |
557 | } \ |
558 | schedule()); \ |
559 | \ |
560 | hrtimer_cancel(&__t.timer); \ |
561 | destroy_hrtimer_on_stack(&__t.timer); \ |
562 | __ret; \ |
563 | }) |
564 | |
565 | /** |
566 | * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses |
567 | * @wq_head: the waitqueue to wait on |
568 | * @condition: a C expression for the event to wait for |
569 | * @timeout: timeout, as a ktime_t |
570 | * |
571 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the |
572 | * @condition evaluates to true or a signal is received. |
573 | * The @condition is checked each time the waitqueue @wq_head is woken up. |
574 | * |
575 | * wake_up() has to be called after changing any variable that could |
576 | * change the result of the wait condition. |
577 | * |
578 | * The function returns 0 if @condition became true, or -ETIME if the timeout |
579 | * elapsed. |
580 | */ |
581 | #define wait_event_hrtimeout(wq_head, condition, timeout) \ |
582 | ({ \ |
583 | int __ret = 0; \ |
584 | might_sleep(); \ |
585 | if (!(condition)) \ |
586 | __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \ |
587 | TASK_UNINTERRUPTIBLE); \ |
588 | __ret; \ |
589 | }) |
590 | |
591 | /** |
592 | * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses |
593 | * @wq: the waitqueue to wait on |
594 | * @condition: a C expression for the event to wait for |
595 | * @timeout: timeout, as a ktime_t |
596 | * |
597 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
598 | * @condition evaluates to true or a signal is received. |
599 | * The @condition is checked each time the waitqueue @wq is woken up. |
600 | * |
601 | * wake_up() has to be called after changing any variable that could |
602 | * change the result of the wait condition. |
603 | * |
604 | * The function returns 0 if @condition became true, -ERESTARTSYS if it was |
605 | * interrupted by a signal, or -ETIME if the timeout elapsed. |
606 | */ |
607 | #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ |
608 | ({ \ |
609 | long __ret = 0; \ |
610 | might_sleep(); \ |
611 | if (!(condition)) \ |
612 | __ret = __wait_event_hrtimeout(wq, condition, timeout, \ |
613 | TASK_INTERRUPTIBLE); \ |
614 | __ret; \ |
615 | }) |
616 | |
617 | #define __wait_event_interruptible_exclusive(wq, condition) \ |
618 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ |
619 | schedule()) |
620 | |
621 | #define wait_event_interruptible_exclusive(wq, condition) \ |
622 | ({ \ |
623 | int __ret = 0; \ |
624 | might_sleep(); \ |
625 | if (!(condition)) \ |
626 | __ret = __wait_event_interruptible_exclusive(wq, condition); \ |
627 | __ret; \ |
628 | }) |
629 | |
630 | #define __wait_event_killable_exclusive(wq, condition) \ |
631 | ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \ |
632 | schedule()) |
633 | |
634 | #define wait_event_killable_exclusive(wq, condition) \ |
635 | ({ \ |
636 | int __ret = 0; \ |
637 | might_sleep(); \ |
638 | if (!(condition)) \ |
639 | __ret = __wait_event_killable_exclusive(wq, condition); \ |
640 | __ret; \ |
641 | }) |
642 | |
643 | |
644 | #define __wait_event_freezable_exclusive(wq, condition) \ |
645 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ |
646 | freezable_schedule()) |
647 | |
648 | #define wait_event_freezable_exclusive(wq, condition) \ |
649 | ({ \ |
650 | int __ret = 0; \ |
651 | might_sleep(); \ |
652 | if (!(condition)) \ |
653 | __ret = __wait_event_freezable_exclusive(wq, condition); \ |
654 | __ret; \ |
655 | }) |
656 | |
657 | /** |
658 | * wait_event_idle - wait for a condition without contributing to system load |
659 | * @wq_head: the waitqueue to wait on |
660 | * @condition: a C expression for the event to wait for |
661 | * |
662 | * The process is put to sleep (TASK_IDLE) until the |
663 | * @condition evaluates to true. |
664 | * The @condition is checked each time the waitqueue @wq_head is woken up. |
665 | * |
666 | * wake_up() has to be called after changing any variable that could |
667 | * change the result of the wait condition. |
668 | * |
669 | */ |
670 | #define wait_event_idle(wq_head, condition) \ |
671 | do { \ |
672 | might_sleep(); \ |
673 | if (!(condition)) \ |
674 | ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \ |
675 | } while (0) |
676 | |
677 | /** |
678 | * wait_event_idle_exclusive - wait for a condition with contributing to system load |
679 | * @wq_head: the waitqueue to wait on |
680 | * @condition: a C expression for the event to wait for |
681 | * |
682 | * The process is put to sleep (TASK_IDLE) until the |
683 | * @condition evaluates to true. |
684 | * The @condition is checked each time the waitqueue @wq_head is woken up. |
685 | * |
686 | * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag |
687 | * set thus if other processes wait on the same list, when this |
688 | * process is woken further processes are not considered. |
689 | * |
690 | * wake_up() has to be called after changing any variable that could |
691 | * change the result of the wait condition. |
692 | * |
693 | */ |
694 | #define wait_event_idle_exclusive(wq_head, condition) \ |
695 | do { \ |
696 | might_sleep(); \ |
697 | if (!(condition)) \ |
698 | ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \ |
699 | } while (0) |
700 | |
701 | #define __wait_event_idle_timeout(wq_head, condition, timeout) \ |
702 | ___wait_event(wq_head, ___wait_cond_timeout(condition), \ |
703 | TASK_IDLE, 0, timeout, \ |
704 | __ret = schedule_timeout(__ret)) |
705 | |
706 | /** |
707 | * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses |
708 | * @wq_head: the waitqueue to wait on |
709 | * @condition: a C expression for the event to wait for |
710 | * @timeout: timeout, in jiffies |
711 | * |
712 | * The process is put to sleep (TASK_IDLE) until the |
713 | * @condition evaluates to true. The @condition is checked each time |
714 | * the waitqueue @wq_head is woken up. |
715 | * |
716 | * wake_up() has to be called after changing any variable that could |
717 | * change the result of the wait condition. |
718 | * |
719 | * Returns: |
720 | * 0 if the @condition evaluated to %false after the @timeout elapsed, |
721 | * 1 if the @condition evaluated to %true after the @timeout elapsed, |
722 | * or the remaining jiffies (at least 1) if the @condition evaluated |
723 | * to %true before the @timeout elapsed. |
724 | */ |
725 | #define wait_event_idle_timeout(wq_head, condition, timeout) \ |
726 | ({ \ |
727 | long __ret = timeout; \ |
728 | might_sleep(); \ |
729 | if (!___wait_cond_timeout(condition)) \ |
730 | __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \ |
731 | __ret; \ |
732 | }) |
733 | |
734 | #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ |
735 | ___wait_event(wq_head, ___wait_cond_timeout(condition), \ |
736 | TASK_IDLE, 1, timeout, \ |
737 | __ret = schedule_timeout(__ret)) |
738 | |
739 | /** |
740 | * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses |
741 | * @wq_head: the waitqueue to wait on |
742 | * @condition: a C expression for the event to wait for |
743 | * @timeout: timeout, in jiffies |
744 | * |
745 | * The process is put to sleep (TASK_IDLE) until the |
746 | * @condition evaluates to true. The @condition is checked each time |
747 | * the waitqueue @wq_head is woken up. |
748 | * |
749 | * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag |
750 | * set thus if other processes wait on the same list, when this |
751 | * process is woken further processes are not considered. |
752 | * |
753 | * wake_up() has to be called after changing any variable that could |
754 | * change the result of the wait condition. |
755 | * |
756 | * Returns: |
757 | * 0 if the @condition evaluated to %false after the @timeout elapsed, |
758 | * 1 if the @condition evaluated to %true after the @timeout elapsed, |
759 | * or the remaining jiffies (at least 1) if the @condition evaluated |
760 | * to %true before the @timeout elapsed. |
761 | */ |
762 | #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ |
763 | ({ \ |
764 | long __ret = timeout; \ |
765 | might_sleep(); \ |
766 | if (!___wait_cond_timeout(condition)) \ |
767 | __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\ |
768 | __ret; \ |
769 | }) |
770 | |
771 | extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *); |
772 | extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); |
773 | |
774 | #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ |
775 | ({ \ |
776 | int __ret; \ |
777 | DEFINE_WAIT(__wait); \ |
778 | if (exclusive) \ |
779 | __wait.flags |= WQ_FLAG_EXCLUSIVE; \ |
780 | do { \ |
781 | __ret = fn(&(wq), &__wait); \ |
782 | if (__ret) \ |
783 | break; \ |
784 | } while (!(condition)); \ |
785 | __remove_wait_queue(&(wq), &__wait); \ |
786 | __set_current_state(TASK_RUNNING); \ |
787 | __ret; \ |
788 | }) |
789 | |
790 | |
791 | /** |
792 | * wait_event_interruptible_locked - sleep until a condition gets true |
793 | * @wq: the waitqueue to wait on |
794 | * @condition: a C expression for the event to wait for |
795 | * |
796 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
797 | * @condition evaluates to true or a signal is received. |
798 | * The @condition is checked each time the waitqueue @wq is woken up. |
799 | * |
800 | * It must be called with wq.lock being held. This spinlock is |
801 | * unlocked while sleeping but @condition testing is done while lock |
802 | * is held and when this macro exits the lock is held. |
803 | * |
804 | * The lock is locked/unlocked using spin_lock()/spin_unlock() |
805 | * functions which must match the way they are locked/unlocked outside |
806 | * of this macro. |
807 | * |
808 | * wake_up_locked() has to be called after changing any variable that could |
809 | * change the result of the wait condition. |
810 | * |
811 | * The function will return -ERESTARTSYS if it was interrupted by a |
812 | * signal and 0 if @condition evaluated to true. |
813 | */ |
814 | #define wait_event_interruptible_locked(wq, condition) \ |
815 | ((condition) \ |
816 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr)) |
817 | |
818 | /** |
819 | * wait_event_interruptible_locked_irq - sleep until a condition gets true |
820 | * @wq: the waitqueue to wait on |
821 | * @condition: a C expression for the event to wait for |
822 | * |
823 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
824 | * @condition evaluates to true or a signal is received. |
825 | * The @condition is checked each time the waitqueue @wq is woken up. |
826 | * |
827 | * It must be called with wq.lock being held. This spinlock is |
828 | * unlocked while sleeping but @condition testing is done while lock |
829 | * is held and when this macro exits the lock is held. |
830 | * |
831 | * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() |
832 | * functions which must match the way they are locked/unlocked outside |
833 | * of this macro. |
834 | * |
835 | * wake_up_locked() has to be called after changing any variable that could |
836 | * change the result of the wait condition. |
837 | * |
838 | * The function will return -ERESTARTSYS if it was interrupted by a |
839 | * signal and 0 if @condition evaluated to true. |
840 | */ |
841 | #define wait_event_interruptible_locked_irq(wq, condition) \ |
842 | ((condition) \ |
843 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq)) |
844 | |
845 | /** |
846 | * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true |
847 | * @wq: the waitqueue to wait on |
848 | * @condition: a C expression for the event to wait for |
849 | * |
850 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
851 | * @condition evaluates to true or a signal is received. |
852 | * The @condition is checked each time the waitqueue @wq is woken up. |
853 | * |
854 | * It must be called with wq.lock being held. This spinlock is |
855 | * unlocked while sleeping but @condition testing is done while lock |
856 | * is held and when this macro exits the lock is held. |
857 | * |
858 | * The lock is locked/unlocked using spin_lock()/spin_unlock() |
859 | * functions which must match the way they are locked/unlocked outside |
860 | * of this macro. |
861 | * |
862 | * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag |
863 | * set thus when other process waits process on the list if this |
864 | * process is awaken further processes are not considered. |
865 | * |
866 | * wake_up_locked() has to be called after changing any variable that could |
867 | * change the result of the wait condition. |
868 | * |
869 | * The function will return -ERESTARTSYS if it was interrupted by a |
870 | * signal and 0 if @condition evaluated to true. |
871 | */ |
872 | #define wait_event_interruptible_exclusive_locked(wq, condition) \ |
873 | ((condition) \ |
874 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr)) |
875 | |
876 | /** |
877 | * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true |
878 | * @wq: the waitqueue to wait on |
879 | * @condition: a C expression for the event to wait for |
880 | * |
881 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
882 | * @condition evaluates to true or a signal is received. |
883 | * The @condition is checked each time the waitqueue @wq is woken up. |
884 | * |
885 | * It must be called with wq.lock being held. This spinlock is |
886 | * unlocked while sleeping but @condition testing is done while lock |
887 | * is held and when this macro exits the lock is held. |
888 | * |
889 | * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() |
890 | * functions which must match the way they are locked/unlocked outside |
891 | * of this macro. |
892 | * |
893 | * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag |
894 | * set thus when other process waits process on the list if this |
895 | * process is awaken further processes are not considered. |
896 | * |
897 | * wake_up_locked() has to be called after changing any variable that could |
898 | * change the result of the wait condition. |
899 | * |
900 | * The function will return -ERESTARTSYS if it was interrupted by a |
901 | * signal and 0 if @condition evaluated to true. |
902 | */ |
903 | #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ |
904 | ((condition) \ |
905 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq)) |
906 | |
907 | |
908 | #define __wait_event_killable(wq, condition) \ |
909 | ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule()) |
910 | |
911 | /** |
912 | * wait_event_killable - sleep until a condition gets true |
913 | * @wq_head: the waitqueue to wait on |
914 | * @condition: a C expression for the event to wait for |
915 | * |
916 | * The process is put to sleep (TASK_KILLABLE) until the |
917 | * @condition evaluates to true or a signal is received. |
918 | * The @condition is checked each time the waitqueue @wq_head is woken up. |
919 | * |
920 | * wake_up() has to be called after changing any variable that could |
921 | * change the result of the wait condition. |
922 | * |
923 | * The function will return -ERESTARTSYS if it was interrupted by a |
924 | * signal and 0 if @condition evaluated to true. |
925 | */ |
926 | #define wait_event_killable(wq_head, condition) \ |
927 | ({ \ |
928 | int __ret = 0; \ |
929 | might_sleep(); \ |
930 | if (!(condition)) \ |
931 | __ret = __wait_event_killable(wq_head, condition); \ |
932 | __ret; \ |
933 | }) |
934 | |
935 | #define __wait_event_killable_timeout(wq_head, condition, timeout) \ |
936 | ___wait_event(wq_head, ___wait_cond_timeout(condition), \ |
937 | TASK_KILLABLE, 0, timeout, \ |
938 | __ret = schedule_timeout(__ret)) |
939 | |
940 | /** |
941 | * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses |
942 | * @wq_head: the waitqueue to wait on |
943 | * @condition: a C expression for the event to wait for |
944 | * @timeout: timeout, in jiffies |
945 | * |
946 | * The process is put to sleep (TASK_KILLABLE) until the |
947 | * @condition evaluates to true or a kill signal is received. |
948 | * The @condition is checked each time the waitqueue @wq_head is woken up. |
949 | * |
950 | * wake_up() has to be called after changing any variable that could |
951 | * change the result of the wait condition. |
952 | * |
953 | * Returns: |
954 | * 0 if the @condition evaluated to %false after the @timeout elapsed, |
955 | * 1 if the @condition evaluated to %true after the @timeout elapsed, |
956 | * the remaining jiffies (at least 1) if the @condition evaluated |
957 | * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was |
958 | * interrupted by a kill signal. |
959 | * |
960 | * Only kill signals interrupt this process. |
961 | */ |
962 | #define wait_event_killable_timeout(wq_head, condition, timeout) \ |
963 | ({ \ |
964 | long __ret = timeout; \ |
965 | might_sleep(); \ |
966 | if (!___wait_cond_timeout(condition)) \ |
967 | __ret = __wait_event_killable_timeout(wq_head, \ |
968 | condition, timeout); \ |
969 | __ret; \ |
970 | }) |
971 | |
972 | |
973 | #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \ |
974 | (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ |
975 | spin_unlock_irq(&lock); \ |
976 | cmd; \ |
977 | schedule(); \ |
978 | spin_lock_irq(&lock)) |
979 | |
980 | /** |
981 | * wait_event_lock_irq_cmd - sleep until a condition gets true. The |
982 | * condition is checked under the lock. This |
983 | * is expected to be called with the lock |
984 | * taken. |
985 | * @wq_head: the waitqueue to wait on |
986 | * @condition: a C expression for the event to wait for |
987 | * @lock: a locked spinlock_t, which will be released before cmd |
988 | * and schedule() and reacquired afterwards. |
989 | * @cmd: a command which is invoked outside the critical section before |
990 | * sleep |
991 | * |
992 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the |
993 | * @condition evaluates to true. The @condition is checked each time |
994 | * the waitqueue @wq_head is woken up. |
995 | * |
996 | * wake_up() has to be called after changing any variable that could |
997 | * change the result of the wait condition. |
998 | * |
999 | * This is supposed to be called while holding the lock. The lock is |
1000 | * dropped before invoking the cmd and going to sleep and is reacquired |
1001 | * afterwards. |
1002 | */ |
1003 | #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \ |
1004 | do { \ |
1005 | if (condition) \ |
1006 | break; \ |
1007 | __wait_event_lock_irq(wq_head, condition, lock, cmd); \ |
1008 | } while (0) |
1009 | |
1010 | /** |
1011 | * wait_event_lock_irq - sleep until a condition gets true. The |
1012 | * condition is checked under the lock. This |
1013 | * is expected to be called with the lock |
1014 | * taken. |
1015 | * @wq_head: the waitqueue to wait on |
1016 | * @condition: a C expression for the event to wait for |
1017 | * @lock: a locked spinlock_t, which will be released before schedule() |
1018 | * and reacquired afterwards. |
1019 | * |
1020 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the |
1021 | * @condition evaluates to true. The @condition is checked each time |
1022 | * the waitqueue @wq_head is woken up. |
1023 | * |
1024 | * wake_up() has to be called after changing any variable that could |
1025 | * change the result of the wait condition. |
1026 | * |
1027 | * This is supposed to be called while holding the lock. The lock is |
1028 | * dropped before going to sleep and is reacquired afterwards. |
1029 | */ |
1030 | #define wait_event_lock_irq(wq_head, condition, lock) \ |
1031 | do { \ |
1032 | if (condition) \ |
1033 | break; \ |
1034 | __wait_event_lock_irq(wq_head, condition, lock, ); \ |
1035 | } while (0) |
1036 | |
1037 | |
1038 | #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \ |
1039 | ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ |
1040 | spin_unlock_irq(&lock); \ |
1041 | cmd; \ |
1042 | schedule(); \ |
1043 | spin_lock_irq(&lock)) |
1044 | |
1045 | /** |
1046 | * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. |
1047 | * The condition is checked under the lock. This is expected to |
1048 | * be called with the lock taken. |
1049 | * @wq_head: the waitqueue to wait on |
1050 | * @condition: a C expression for the event to wait for |
1051 | * @lock: a locked spinlock_t, which will be released before cmd and |
1052 | * schedule() and reacquired afterwards. |
1053 | * @cmd: a command which is invoked outside the critical section before |
1054 | * sleep |
1055 | * |
1056 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
1057 | * @condition evaluates to true or a signal is received. The @condition is |
1058 | * checked each time the waitqueue @wq_head is woken up. |
1059 | * |
1060 | * wake_up() has to be called after changing any variable that could |
1061 | * change the result of the wait condition. |
1062 | * |
1063 | * This is supposed to be called while holding the lock. The lock is |
1064 | * dropped before invoking the cmd and going to sleep and is reacquired |
1065 | * afterwards. |
1066 | * |
1067 | * The macro will return -ERESTARTSYS if it was interrupted by a signal |
1068 | * and 0 if @condition evaluated to true. |
1069 | */ |
1070 | #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \ |
1071 | ({ \ |
1072 | int __ret = 0; \ |
1073 | if (!(condition)) \ |
1074 | __ret = __wait_event_interruptible_lock_irq(wq_head, \ |
1075 | condition, lock, cmd); \ |
1076 | __ret; \ |
1077 | }) |
1078 | |
1079 | /** |
1080 | * wait_event_interruptible_lock_irq - sleep until a condition gets true. |
1081 | * The condition is checked under the lock. This is expected |
1082 | * to be called with the lock taken. |
1083 | * @wq_head: the waitqueue to wait on |
1084 | * @condition: a C expression for the event to wait for |
1085 | * @lock: a locked spinlock_t, which will be released before schedule() |
1086 | * and reacquired afterwards. |
1087 | * |
1088 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
1089 | * @condition evaluates to true or signal is received. The @condition is |
1090 | * checked each time the waitqueue @wq_head is woken up. |
1091 | * |
1092 | * wake_up() has to be called after changing any variable that could |
1093 | * change the result of the wait condition. |
1094 | * |
1095 | * This is supposed to be called while holding the lock. The lock is |
1096 | * dropped before going to sleep and is reacquired afterwards. |
1097 | * |
1098 | * The macro will return -ERESTARTSYS if it was interrupted by a signal |
1099 | * and 0 if @condition evaluated to true. |
1100 | */ |
1101 | #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \ |
1102 | ({ \ |
1103 | int __ret = 0; \ |
1104 | if (!(condition)) \ |
1105 | __ret = __wait_event_interruptible_lock_irq(wq_head, \ |
1106 | condition, lock,); \ |
1107 | __ret; \ |
1108 | }) |
1109 | |
1110 | #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \ |
1111 | ___wait_event(wq_head, ___wait_cond_timeout(condition), \ |
1112 | state, 0, timeout, \ |
1113 | spin_unlock_irq(&lock); \ |
1114 | __ret = schedule_timeout(__ret); \ |
1115 | spin_lock_irq(&lock)); |
1116 | |
1117 | /** |
1118 | * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets |
1119 | * true or a timeout elapses. The condition is checked under |
1120 | * the lock. This is expected to be called with the lock taken. |
1121 | * @wq_head: the waitqueue to wait on |
1122 | * @condition: a C expression for the event to wait for |
1123 | * @lock: a locked spinlock_t, which will be released before schedule() |
1124 | * and reacquired afterwards. |
1125 | * @timeout: timeout, in jiffies |
1126 | * |
1127 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
1128 | * @condition evaluates to true or signal is received. The @condition is |
1129 | * checked each time the waitqueue @wq_head is woken up. |
1130 | * |
1131 | * wake_up() has to be called after changing any variable that could |
1132 | * change the result of the wait condition. |
1133 | * |
1134 | * This is supposed to be called while holding the lock. The lock is |
1135 | * dropped before going to sleep and is reacquired afterwards. |
1136 | * |
1137 | * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it |
1138 | * was interrupted by a signal, and the remaining jiffies otherwise |
1139 | * if the condition evaluated to true before the timeout elapsed. |
1140 | */ |
1141 | #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \ |
1142 | timeout) \ |
1143 | ({ \ |
1144 | long __ret = timeout; \ |
1145 | if (!___wait_cond_timeout(condition)) \ |
1146 | __ret = __wait_event_lock_irq_timeout( \ |
1147 | wq_head, condition, lock, timeout, \ |
1148 | TASK_INTERRUPTIBLE); \ |
1149 | __ret; \ |
1150 | }) |
1151 | |
1152 | #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \ |
1153 | ({ \ |
1154 | long __ret = timeout; \ |
1155 | if (!___wait_cond_timeout(condition)) \ |
1156 | __ret = __wait_event_lock_irq_timeout( \ |
1157 | wq_head, condition, lock, timeout, \ |
1158 | TASK_UNINTERRUPTIBLE); \ |
1159 | __ret; \ |
1160 | }) |
1161 | |
1162 | /* |
1163 | * Waitqueues which are removed from the waitqueue_head at wakeup time |
1164 | */ |
1165 | void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); |
1166 | bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); |
1167 | long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); |
1168 | void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); |
1169 | long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); |
1170 | int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); |
1171 | int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); |
1172 | |
1173 | #define DEFINE_WAIT_FUNC(name, function) \ |
1174 | struct wait_queue_entry name = { \ |
1175 | .private = current, \ |
1176 | .func = function, \ |
1177 | .entry = LIST_HEAD_INIT((name).entry), \ |
1178 | } |
1179 | |
1180 | #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) |
1181 | |
1182 | #define init_wait(wait) \ |
1183 | do { \ |
1184 | (wait)->private = current; \ |
1185 | (wait)->func = autoremove_wake_function; \ |
1186 | INIT_LIST_HEAD(&(wait)->entry); \ |
1187 | (wait)->flags = 0; \ |
1188 | } while (0) |
1189 | |
1190 | typedef int (*task_call_f)(struct task_struct *p, void *arg); |
1191 | extern int task_call_func(struct task_struct *p, task_call_f func, void *arg); |
1192 | |
1193 | #endif /* _LINUX_WAIT_H */ |
1194 | |