1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_RCUWAIT_H_ |
3 | #define _LINUX_RCUWAIT_H_ |
4 | |
5 | #include <linux/rcupdate.h> |
6 | #include <linux/sched/signal.h> |
7 | |
8 | /* |
9 | * rcuwait provides a way of blocking and waking up a single |
10 | * task in an rcu-safe manner. |
11 | * |
12 | * The only time @task is non-nil is when a user is blocked (or |
13 | * checking if it needs to) on a condition, and reset as soon as we |
14 | * know that the condition has succeeded and are awoken. |
15 | */ |
16 | struct rcuwait { |
17 | struct task_struct __rcu *task; |
18 | }; |
19 | |
20 | #define __RCUWAIT_INITIALIZER(name) \ |
21 | { .task = NULL, } |
22 | |
23 | static inline void rcuwait_init(struct rcuwait *w) |
24 | { |
25 | w->task = NULL; |
26 | } |
27 | |
28 | /* |
29 | * Note: this provides no serialization and, just as with waitqueues, |
30 | * requires care to estimate as to whether or not the wait is active. |
31 | */ |
32 | static inline int rcuwait_active(struct rcuwait *w) |
33 | { |
34 | return !!rcu_access_pointer(w->task); |
35 | } |
36 | |
37 | extern int rcuwait_wake_up(struct rcuwait *w); |
38 | |
39 | /* |
40 | * The caller is responsible for locking around rcuwait_wait_event(), |
41 | * and [prepare_to/finish]_rcuwait() such that writes to @task are |
42 | * properly serialized. |
43 | */ |
44 | |
45 | static inline void prepare_to_rcuwait(struct rcuwait *w) |
46 | { |
47 | rcu_assign_pointer(w->task, current); |
48 | } |
49 | |
50 | extern void finish_rcuwait(struct rcuwait *w); |
51 | |
52 | #define ___rcuwait_wait_event(w, condition, state, ret, cmd) \ |
53 | ({ \ |
54 | long __ret = ret; \ |
55 | prepare_to_rcuwait(w); \ |
56 | for (;;) { \ |
57 | /* \ |
58 | * Implicit barrier (A) pairs with (B) in \ |
59 | * rcuwait_wake_up(). \ |
60 | */ \ |
61 | set_current_state(state); \ |
62 | if (condition) \ |
63 | break; \ |
64 | \ |
65 | if (signal_pending_state(state, current)) { \ |
66 | __ret = -EINTR; \ |
67 | break; \ |
68 | } \ |
69 | \ |
70 | cmd; \ |
71 | } \ |
72 | finish_rcuwait(w); \ |
73 | __ret; \ |
74 | }) |
75 | |
76 | #define rcuwait_wait_event(w, condition, state) \ |
77 | ___rcuwait_wait_event(w, condition, state, 0, schedule()) |
78 | |
79 | #define __rcuwait_wait_event_timeout(w, condition, state, timeout) \ |
80 | ___rcuwait_wait_event(w, ___wait_cond_timeout(condition), \ |
81 | state, timeout, \ |
82 | __ret = schedule_timeout(__ret)) |
83 | |
84 | #define rcuwait_wait_event_timeout(w, condition, state, timeout) \ |
85 | ({ \ |
86 | long __ret = timeout; \ |
87 | if (!___wait_cond_timeout(condition)) \ |
88 | __ret = __rcuwait_wait_event_timeout(w, condition, \ |
89 | state, timeout); \ |
90 | __ret; \ |
91 | }) |
92 | |
93 | #endif /* _LINUX_RCUWAIT_H_ */ |
94 | |