1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Support for async notification of waitid |
4 | */ |
5 | #include <linux/kernel.h> |
6 | #include <linux/errno.h> |
7 | #include <linux/fs.h> |
8 | #include <linux/file.h> |
9 | #include <linux/compat.h> |
10 | #include <linux/io_uring.h> |
11 | |
12 | #include <uapi/linux/io_uring.h> |
13 | |
14 | #include "io_uring.h" |
15 | #include "cancel.h" |
16 | #include "waitid.h" |
17 | #include "../kernel/exit.h" |
18 | |
19 | static void io_waitid_cb(struct io_kiocb *req, io_tw_token_t tw); |
20 | |
21 | #define IO_WAITID_CANCEL_FLAG BIT(31) |
22 | #define IO_WAITID_REF_MASK GENMASK(30, 0) |
23 | |
24 | struct io_waitid { |
25 | struct file *file; |
26 | int which; |
27 | pid_t upid; |
28 | int options; |
29 | atomic_t refs; |
30 | struct wait_queue_head *head; |
31 | struct siginfo __user *infop; |
32 | struct waitid_info info; |
33 | }; |
34 | |
35 | static void io_waitid_free(struct io_kiocb *req) |
36 | { |
37 | struct io_waitid_async *iwa = req->async_data; |
38 | |
39 | put_pid(pid: iwa->wo.wo_pid); |
40 | kfree(objp: req->async_data); |
41 | req->async_data = NULL; |
42 | req->flags &= ~REQ_F_ASYNC_DATA; |
43 | } |
44 | |
45 | static bool io_waitid_compat_copy_si(struct io_waitid *iw, int signo) |
46 | { |
47 | struct compat_siginfo __user *infop; |
48 | bool ret; |
49 | |
50 | infop = (struct compat_siginfo __user *) iw->infop; |
51 | |
52 | if (!user_write_access_begin(infop, sizeof(*infop))) |
53 | return false; |
54 | |
55 | unsafe_put_user(signo, &infop->si_signo, Efault); |
56 | unsafe_put_user(0, &infop->si_errno, Efault); |
57 | unsafe_put_user(iw->info.cause, &infop->si_code, Efault); |
58 | unsafe_put_user(iw->info.pid, &infop->si_pid, Efault); |
59 | unsafe_put_user(iw->info.uid, &infop->si_uid, Efault); |
60 | unsafe_put_user(iw->info.status, &infop->si_status, Efault); |
61 | ret = true; |
62 | done: |
63 | user_write_access_end(); |
64 | return ret; |
65 | Efault: |
66 | ret = false; |
67 | goto done; |
68 | } |
69 | |
70 | static bool io_waitid_copy_si(struct io_kiocb *req, int signo) |
71 | { |
72 | struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); |
73 | bool ret; |
74 | |
75 | if (!iw->infop) |
76 | return true; |
77 | |
78 | if (io_is_compat(ctx: req->ctx)) |
79 | return io_waitid_compat_copy_si(iw, signo); |
80 | |
81 | if (!user_write_access_begin(iw->infop, sizeof(*iw->infop))) |
82 | return false; |
83 | |
84 | unsafe_put_user(signo, &iw->infop->si_signo, Efault); |
85 | unsafe_put_user(0, &iw->infop->si_errno, Efault); |
86 | unsafe_put_user(iw->info.cause, &iw->infop->si_code, Efault); |
87 | unsafe_put_user(iw->info.pid, &iw->infop->si_pid, Efault); |
88 | unsafe_put_user(iw->info.uid, &iw->infop->si_uid, Efault); |
89 | unsafe_put_user(iw->info.status, &iw->infop->si_status, Efault); |
90 | ret = true; |
91 | done: |
92 | user_write_access_end(); |
93 | return ret; |
94 | Efault: |
95 | ret = false; |
96 | goto done; |
97 | } |
98 | |
99 | static int io_waitid_finish(struct io_kiocb *req, int ret) |
100 | { |
101 | int signo = 0; |
102 | |
103 | if (ret > 0) { |
104 | signo = SIGCHLD; |
105 | ret = 0; |
106 | } |
107 | |
108 | if (!io_waitid_copy_si(req, signo)) |
109 | ret = -EFAULT; |
110 | io_waitid_free(req); |
111 | return ret; |
112 | } |
113 | |
114 | static void io_waitid_complete(struct io_kiocb *req, int ret) |
115 | { |
116 | struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); |
117 | |
118 | /* anyone completing better be holding a reference */ |
119 | WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK)); |
120 | |
121 | lockdep_assert_held(&req->ctx->uring_lock); |
122 | |
123 | hlist_del_init(n: &req->hash_node); |
124 | |
125 | ret = io_waitid_finish(req, ret); |
126 | if (ret < 0) |
127 | req_set_fail(req); |
128 | io_req_set_res(req, res: ret, cflags: 0); |
129 | } |
130 | |
131 | static bool __io_waitid_cancel(struct io_kiocb *req) |
132 | { |
133 | struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); |
134 | struct io_waitid_async *iwa = req->async_data; |
135 | |
136 | /* |
137 | * Mark us canceled regardless of ownership. This will prevent a |
138 | * potential retry from a spurious wakeup. |
139 | */ |
140 | atomic_or(IO_WAITID_CANCEL_FLAG, v: &iw->refs); |
141 | |
142 | /* claim ownership */ |
143 | if (atomic_fetch_inc(v: &iw->refs) & IO_WAITID_REF_MASK) |
144 | return false; |
145 | |
146 | spin_lock_irq(lock: &iw->head->lock); |
147 | list_del_init(entry: &iwa->wo.child_wait.entry); |
148 | spin_unlock_irq(lock: &iw->head->lock); |
149 | io_waitid_complete(req, ret: -ECANCELED); |
150 | io_req_queue_tw_complete(req, res: -ECANCELED); |
151 | return true; |
152 | } |
153 | |
154 | int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, |
155 | unsigned int issue_flags) |
156 | { |
157 | return io_cancel_remove(ctx, cd, issue_flags, list: &ctx->waitid_list, cancel: __io_waitid_cancel); |
158 | } |
159 | |
160 | bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, |
161 | bool cancel_all) |
162 | { |
163 | return io_cancel_remove_all(ctx, tctx, list: &ctx->waitid_list, cancel_all, cancel: __io_waitid_cancel); |
164 | } |
165 | |
166 | static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req) |
167 | { |
168 | struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); |
169 | struct io_waitid_async *iwa = req->async_data; |
170 | |
171 | if (!atomic_sub_return(i: 1, v: &iw->refs)) |
172 | return false; |
173 | |
174 | /* |
175 | * Wakeup triggered, racing with us. It was prevented from |
176 | * completing because of that, queue up the tw to do that. |
177 | */ |
178 | req->io_task_work.func = io_waitid_cb; |
179 | io_req_task_work_add(req); |
180 | remove_wait_queue(wq_head: iw->head, wq_entry: &iwa->wo.child_wait); |
181 | return true; |
182 | } |
183 | |
184 | static void io_waitid_cb(struct io_kiocb *req, io_tw_token_t tw) |
185 | { |
186 | struct io_waitid_async *iwa = req->async_data; |
187 | struct io_ring_ctx *ctx = req->ctx; |
188 | int ret; |
189 | |
190 | io_tw_lock(ctx, tw); |
191 | |
192 | ret = __do_wait(wo: &iwa->wo); |
193 | |
194 | /* |
195 | * If we get -ERESTARTSYS here, we need to re-arm and check again |
196 | * to ensure we get another callback. If the retry works, then we can |
197 | * just remove ourselves from the waitqueue again and finish the |
198 | * request. |
199 | */ |
200 | if (unlikely(ret == -ERESTARTSYS)) { |
201 | struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); |
202 | |
203 | /* Don't retry if cancel found it meanwhile */ |
204 | ret = -ECANCELED; |
205 | if (!(atomic_read(v: &iw->refs) & IO_WAITID_CANCEL_FLAG)) { |
206 | iw->head = ¤t->signal->wait_chldexit; |
207 | add_wait_queue(wq_head: iw->head, wq_entry: &iwa->wo.child_wait); |
208 | ret = __do_wait(wo: &iwa->wo); |
209 | if (ret == -ERESTARTSYS) { |
210 | /* retry armed, drop our ref */ |
211 | io_waitid_drop_issue_ref(req); |
212 | return; |
213 | } |
214 | |
215 | remove_wait_queue(wq_head: iw->head, wq_entry: &iwa->wo.child_wait); |
216 | } |
217 | } |
218 | |
219 | io_waitid_complete(req, ret); |
220 | io_req_task_complete(req, tw); |
221 | } |
222 | |
223 | static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode, |
224 | int sync, void *key) |
225 | { |
226 | struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait); |
227 | struct io_waitid_async *iwa = container_of(wo, struct io_waitid_async, wo); |
228 | struct io_kiocb *req = iwa->req; |
229 | struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); |
230 | struct task_struct *p = key; |
231 | |
232 | if (!pid_child_should_wake(wo, p)) |
233 | return 0; |
234 | |
235 | /* cancel is in progress */ |
236 | if (atomic_fetch_inc(v: &iw->refs) & IO_WAITID_REF_MASK) |
237 | return 1; |
238 | |
239 | req->io_task_work.func = io_waitid_cb; |
240 | io_req_task_work_add(req); |
241 | list_del_init(entry: &wait->entry); |
242 | return 1; |
243 | } |
244 | |
245 | int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
246 | { |
247 | struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); |
248 | struct io_waitid_async *iwa; |
249 | |
250 | if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags) |
251 | return -EINVAL; |
252 | |
253 | iwa = io_uring_alloc_async_data(NULL, req); |
254 | if (!unlikely(iwa)) |
255 | return -ENOMEM; |
256 | iwa->req = req; |
257 | |
258 | iw->which = READ_ONCE(sqe->len); |
259 | iw->upid = READ_ONCE(sqe->fd); |
260 | iw->options = READ_ONCE(sqe->file_index); |
261 | iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2)); |
262 | return 0; |
263 | } |
264 | |
265 | int io_waitid(struct io_kiocb *req, unsigned int issue_flags) |
266 | { |
267 | struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); |
268 | struct io_waitid_async *iwa = req->async_data; |
269 | struct io_ring_ctx *ctx = req->ctx; |
270 | int ret; |
271 | |
272 | ret = kernel_waitid_prepare(wo: &iwa->wo, which: iw->which, upid: iw->upid, infop: &iw->info, |
273 | options: iw->options, NULL); |
274 | if (ret) |
275 | goto done; |
276 | |
277 | /* |
278 | * Mark the request as busy upfront, in case we're racing with the |
279 | * wakeup. If we are, then we'll notice when we drop this initial |
280 | * reference again after arming. |
281 | */ |
282 | atomic_set(v: &iw->refs, i: 1); |
283 | |
284 | /* |
285 | * Cancel must hold the ctx lock, so there's no risk of cancelation |
286 | * finding us until a) we remain on the list, and b) the lock is |
287 | * dropped. We only need to worry about racing with the wakeup |
288 | * callback. |
289 | */ |
290 | io_ring_submit_lock(ctx, issue_flags); |
291 | hlist_add_head(n: &req->hash_node, h: &ctx->waitid_list); |
292 | |
293 | init_waitqueue_func_entry(wq_entry: &iwa->wo.child_wait, func: io_waitid_wait); |
294 | iwa->wo.child_wait.private = req->tctx->task; |
295 | iw->head = ¤t->signal->wait_chldexit; |
296 | add_wait_queue(wq_head: iw->head, wq_entry: &iwa->wo.child_wait); |
297 | |
298 | ret = __do_wait(wo: &iwa->wo); |
299 | if (ret == -ERESTARTSYS) { |
300 | /* |
301 | * Nobody else grabbed a reference, it'll complete when we get |
302 | * a waitqueue callback, or if someone cancels it. |
303 | */ |
304 | if (!io_waitid_drop_issue_ref(req)) { |
305 | io_ring_submit_unlock(ctx, issue_flags); |
306 | return IOU_ISSUE_SKIP_COMPLETE; |
307 | } |
308 | |
309 | /* |
310 | * Wakeup triggered, racing with us. It was prevented from |
311 | * completing because of that, queue up the tw to do that. |
312 | */ |
313 | io_ring_submit_unlock(ctx, issue_flags); |
314 | return IOU_ISSUE_SKIP_COMPLETE; |
315 | } |
316 | |
317 | hlist_del_init(n: &req->hash_node); |
318 | remove_wait_queue(wq_head: iw->head, wq_entry: &iwa->wo.child_wait); |
319 | ret = io_waitid_finish(req, ret); |
320 | |
321 | io_ring_submit_unlock(ctx, issue_flags); |
322 | done: |
323 | if (ret < 0) |
324 | req_set_fail(req); |
325 | io_req_set_res(req, res: ret, cflags: 0); |
326 | return IOU_COMPLETE; |
327 | } |
328 | |