1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> |
3 | #include <linux/errno.h> |
4 | #include <linux/fs.h> |
5 | #include <linux/file.h> |
6 | #include <linux/mm.h> |
7 | #include <linux/slab.h> |
8 | #include <linux/poll.h> |
9 | #include <linux/hashtable.h> |
10 | #include <linux/io_uring.h> |
11 | |
12 | #include <trace/events/io_uring.h> |
13 | |
14 | #include <uapi/linux/io_uring.h> |
15 | |
16 | #include "io_uring.h" |
17 | #include "refs.h" |
18 | #include "opdef.h" |
19 | #include "kbuf.h" |
20 | #include "poll.h" |
21 | #include "cancel.h" |
22 | |
23 | struct io_poll_update { |
24 | struct file *file; |
25 | u64 old_user_data; |
26 | u64 new_user_data; |
27 | __poll_t events; |
28 | bool update_events; |
29 | bool update_user_data; |
30 | }; |
31 | |
32 | struct io_poll_table { |
33 | struct poll_table_struct pt; |
34 | struct io_kiocb *req; |
35 | int nr_entries; |
36 | int error; |
37 | bool owning; |
38 | /* output value, set only if arm poll returns >0 */ |
39 | __poll_t result_mask; |
40 | }; |
41 | |
42 | #define IO_POLL_CANCEL_FLAG BIT(31) |
43 | #define IO_POLL_RETRY_FLAG BIT(30) |
44 | #define IO_POLL_REF_MASK GENMASK(29, 0) |
45 | |
46 | /* |
47 | * We usually have 1-2 refs taken, 128 is more than enough and we want to |
48 | * maximise the margin between this amount and the moment when it overflows. |
49 | */ |
50 | #define IO_POLL_REF_BIAS 128 |
51 | |
52 | #define IO_WQE_F_DOUBLE 1 |
53 | |
54 | static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
55 | void *key); |
56 | |
57 | static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe) |
58 | { |
59 | unsigned long priv = (unsigned long)wqe->private; |
60 | |
61 | return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE); |
62 | } |
63 | |
64 | static inline bool wqe_is_double(struct wait_queue_entry *wqe) |
65 | { |
66 | unsigned long priv = (unsigned long)wqe->private; |
67 | |
68 | return priv & IO_WQE_F_DOUBLE; |
69 | } |
70 | |
71 | static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) |
72 | { |
73 | int v; |
74 | |
75 | /* |
76 | * poll_refs are already elevated and we don't have much hope for |
77 | * grabbing the ownership. Instead of incrementing set a retry flag |
78 | * to notify the loop that there might have been some change. |
79 | */ |
80 | v = atomic_fetch_or(IO_POLL_RETRY_FLAG, v: &req->poll_refs); |
81 | if (v & IO_POLL_REF_MASK) |
82 | return false; |
83 | return !(atomic_fetch_inc(v: &req->poll_refs) & IO_POLL_REF_MASK); |
84 | } |
85 | |
86 | /* |
87 | * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can |
88 | * bump it and acquire ownership. It's disallowed to modify requests while not |
89 | * owning it, that prevents from races for enqueueing task_work's and b/w |
90 | * arming poll and wakeups. |
91 | */ |
92 | static inline bool io_poll_get_ownership(struct io_kiocb *req) |
93 | { |
94 | if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) |
95 | return io_poll_get_ownership_slowpath(req); |
96 | return !(atomic_fetch_inc(v: &req->poll_refs) & IO_POLL_REF_MASK); |
97 | } |
98 | |
99 | static void io_poll_mark_cancelled(struct io_kiocb *req) |
100 | { |
101 | atomic_or(IO_POLL_CANCEL_FLAG, v: &req->poll_refs); |
102 | } |
103 | |
104 | static struct io_poll *io_poll_get_double(struct io_kiocb *req) |
105 | { |
106 | /* pure poll stashes this in ->async_data, poll driven retry elsewhere */ |
107 | if (req->opcode == IORING_OP_POLL_ADD) |
108 | return req->async_data; |
109 | return req->apoll->double_poll; |
110 | } |
111 | |
112 | static struct io_poll *io_poll_get_single(struct io_kiocb *req) |
113 | { |
114 | if (req->opcode == IORING_OP_POLL_ADD) |
115 | return io_kiocb_to_cmd(req, struct io_poll); |
116 | return &req->apoll->poll; |
117 | } |
118 | |
119 | static void io_poll_req_insert(struct io_kiocb *req) |
120 | { |
121 | struct io_hash_table *table = &req->ctx->cancel_table; |
122 | u32 index = hash_long(req->cqe.user_data, table->hash_bits); |
123 | struct io_hash_bucket *hb = &table->hbs[index]; |
124 | |
125 | spin_lock(lock: &hb->lock); |
126 | hlist_add_head(n: &req->hash_node, h: &hb->list); |
127 | spin_unlock(lock: &hb->lock); |
128 | } |
129 | |
130 | static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) |
131 | { |
132 | struct io_hash_table *table = &req->ctx->cancel_table; |
133 | u32 index = hash_long(req->cqe.user_data, table->hash_bits); |
134 | spinlock_t *lock = &table->hbs[index].lock; |
135 | |
136 | spin_lock(lock); |
137 | hash_del(node: &req->hash_node); |
138 | spin_unlock(lock); |
139 | } |
140 | |
141 | static void io_poll_req_insert_locked(struct io_kiocb *req) |
142 | { |
143 | struct io_hash_table *table = &req->ctx->cancel_table_locked; |
144 | u32 index = hash_long(req->cqe.user_data, table->hash_bits); |
145 | |
146 | lockdep_assert_held(&req->ctx->uring_lock); |
147 | |
148 | hlist_add_head(n: &req->hash_node, h: &table->hbs[index].list); |
149 | } |
150 | |
151 | static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts) |
152 | { |
153 | struct io_ring_ctx *ctx = req->ctx; |
154 | |
155 | if (req->flags & REQ_F_HASH_LOCKED) { |
156 | /* |
157 | * ->cancel_table_locked is protected by ->uring_lock in |
158 | * contrast to per bucket spinlocks. Likely, tctx_task_work() |
159 | * already grabbed the mutex for us, but there is a chance it |
160 | * failed. |
161 | */ |
162 | io_tw_lock(ctx, ts); |
163 | hash_del(node: &req->hash_node); |
164 | req->flags &= ~REQ_F_HASH_LOCKED; |
165 | } else { |
166 | io_poll_req_delete(req, ctx); |
167 | } |
168 | } |
169 | |
170 | static void io_init_poll_iocb(struct io_poll *poll, __poll_t events) |
171 | { |
172 | poll->head = NULL; |
173 | #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP) |
174 | /* mask in events that we always want/need */ |
175 | poll->events = events | IO_POLL_UNMASK; |
176 | INIT_LIST_HEAD(list: &poll->wait.entry); |
177 | init_waitqueue_func_entry(wq_entry: &poll->wait, func: io_poll_wake); |
178 | } |
179 | |
180 | static inline void io_poll_remove_entry(struct io_poll *poll) |
181 | { |
182 | struct wait_queue_head *head = smp_load_acquire(&poll->head); |
183 | |
184 | if (head) { |
185 | spin_lock_irq(lock: &head->lock); |
186 | list_del_init(entry: &poll->wait.entry); |
187 | poll->head = NULL; |
188 | spin_unlock_irq(lock: &head->lock); |
189 | } |
190 | } |
191 | |
192 | static void io_poll_remove_entries(struct io_kiocb *req) |
193 | { |
194 | /* |
195 | * Nothing to do if neither of those flags are set. Avoid dipping |
196 | * into the poll/apoll/double cachelines if we can. |
197 | */ |
198 | if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL))) |
199 | return; |
200 | |
201 | /* |
202 | * While we hold the waitqueue lock and the waitqueue is nonempty, |
203 | * wake_up_pollfree() will wait for us. However, taking the waitqueue |
204 | * lock in the first place can race with the waitqueue being freed. |
205 | * |
206 | * We solve this as eventpoll does: by taking advantage of the fact that |
207 | * all users of wake_up_pollfree() will RCU-delay the actual free. If |
208 | * we enter rcu_read_lock() and see that the pointer to the queue is |
209 | * non-NULL, we can then lock it without the memory being freed out from |
210 | * under us. |
211 | * |
212 | * Keep holding rcu_read_lock() as long as we hold the queue lock, in |
213 | * case the caller deletes the entry from the queue, leaving it empty. |
214 | * In that case, only RCU prevents the queue memory from being freed. |
215 | */ |
216 | rcu_read_lock(); |
217 | if (req->flags & REQ_F_SINGLE_POLL) |
218 | io_poll_remove_entry(poll: io_poll_get_single(req)); |
219 | if (req->flags & REQ_F_DOUBLE_POLL) |
220 | io_poll_remove_entry(poll: io_poll_get_double(req)); |
221 | rcu_read_unlock(); |
222 | } |
223 | |
224 | enum { |
225 | IOU_POLL_DONE = 0, |
226 | IOU_POLL_NO_ACTION = 1, |
227 | IOU_POLL_REMOVE_POLL_USE_RES = 2, |
228 | IOU_POLL_REISSUE = 3, |
229 | }; |
230 | |
231 | /* |
232 | * All poll tw should go through this. Checks for poll events, manages |
233 | * references, does rewait, etc. |
234 | * |
235 | * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action |
236 | * require, which is either spurious wakeup or multishot CQE is served. |
237 | * IOU_POLL_DONE when it's done with the request, then the mask is stored in |
238 | * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot |
239 | * poll and that the result is stored in req->cqe. |
240 | */ |
241 | static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) |
242 | { |
243 | int v; |
244 | |
245 | /* req->task == current here, checking PF_EXITING is safe */ |
246 | if (unlikely(req->task->flags & PF_EXITING)) |
247 | return -ECANCELED; |
248 | |
249 | do { |
250 | v = atomic_read(v: &req->poll_refs); |
251 | |
252 | if (unlikely(v != 1)) { |
253 | /* tw should be the owner and so have some refs */ |
254 | if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK))) |
255 | return IOU_POLL_NO_ACTION; |
256 | if (v & IO_POLL_CANCEL_FLAG) |
257 | return -ECANCELED; |
258 | /* |
259 | * cqe.res contains only events of the first wake up |
260 | * and all others are to be lost. Redo vfs_poll() to get |
261 | * up to date state. |
262 | */ |
263 | if ((v & IO_POLL_REF_MASK) != 1) |
264 | req->cqe.res = 0; |
265 | |
266 | if (v & IO_POLL_RETRY_FLAG) { |
267 | req->cqe.res = 0; |
268 | /* |
269 | * We won't find new events that came in between |
270 | * vfs_poll and the ref put unless we clear the |
271 | * flag in advance. |
272 | */ |
273 | atomic_andnot(IO_POLL_RETRY_FLAG, v: &req->poll_refs); |
274 | v &= ~IO_POLL_RETRY_FLAG; |
275 | } |
276 | } |
277 | |
278 | /* the mask was stashed in __io_poll_execute */ |
279 | if (!req->cqe.res) { |
280 | struct poll_table_struct pt = { ._key = req->apoll_events }; |
281 | req->cqe.res = vfs_poll(file: req->file, pt: &pt) & req->apoll_events; |
282 | /* |
283 | * We got woken with a mask, but someone else got to |
284 | * it first. The above vfs_poll() doesn't add us back |
285 | * to the waitqueue, so if we get nothing back, we |
286 | * should be safe and attempt a reissue. |
287 | */ |
288 | if (unlikely(!req->cqe.res)) { |
289 | /* Multishot armed need not reissue */ |
290 | if (!(req->apoll_events & EPOLLONESHOT)) |
291 | continue; |
292 | return IOU_POLL_REISSUE; |
293 | } |
294 | } |
295 | if (req->apoll_events & EPOLLONESHOT) |
296 | return IOU_POLL_DONE; |
297 | |
298 | /* multishot, just fill a CQE and proceed */ |
299 | if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { |
300 | __poll_t mask = mangle_poll(val: req->cqe.res & |
301 | req->apoll_events); |
302 | |
303 | if (!io_fill_cqe_req_aux(req, defer: ts->locked, res: mask, |
304 | IORING_CQE_F_MORE)) { |
305 | io_req_set_res(req, res: mask, cflags: 0); |
306 | return IOU_POLL_REMOVE_POLL_USE_RES; |
307 | } |
308 | } else { |
309 | int ret = io_poll_issue(req, ts); |
310 | if (ret == IOU_STOP_MULTISHOT) |
311 | return IOU_POLL_REMOVE_POLL_USE_RES; |
312 | if (ret < 0) |
313 | return ret; |
314 | } |
315 | |
316 | /* force the next iteration to vfs_poll() */ |
317 | req->cqe.res = 0; |
318 | |
319 | /* |
320 | * Release all references, retry if someone tried to restart |
321 | * task_work while we were executing it. |
322 | */ |
323 | } while (atomic_sub_return(i: v & IO_POLL_REF_MASK, v: &req->poll_refs) & |
324 | IO_POLL_REF_MASK); |
325 | |
326 | return IOU_POLL_NO_ACTION; |
327 | } |
328 | |
329 | void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts) |
330 | { |
331 | int ret; |
332 | |
333 | ret = io_poll_check_events(req, ts); |
334 | if (ret == IOU_POLL_NO_ACTION) |
335 | return; |
336 | io_poll_remove_entries(req); |
337 | io_poll_tw_hash_eject(req, ts); |
338 | |
339 | if (req->opcode == IORING_OP_POLL_ADD) { |
340 | if (ret == IOU_POLL_DONE) { |
341 | struct io_poll *poll; |
342 | |
343 | poll = io_kiocb_to_cmd(req, struct io_poll); |
344 | req->cqe.res = mangle_poll(val: req->cqe.res & poll->events); |
345 | } else if (ret == IOU_POLL_REISSUE) { |
346 | io_req_task_submit(req, ts); |
347 | return; |
348 | } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) { |
349 | req->cqe.res = ret; |
350 | req_set_fail(req); |
351 | } |
352 | |
353 | io_req_set_res(req, res: req->cqe.res, cflags: 0); |
354 | io_req_task_complete(req, ts); |
355 | } else { |
356 | io_tw_lock(ctx: req->ctx, ts); |
357 | |
358 | if (ret == IOU_POLL_REMOVE_POLL_USE_RES) |
359 | io_req_task_complete(req, ts); |
360 | else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE) |
361 | io_req_task_submit(req, ts); |
362 | else |
363 | io_req_defer_failed(req, res: ret); |
364 | } |
365 | } |
366 | |
367 | static void __io_poll_execute(struct io_kiocb *req, int mask) |
368 | { |
369 | io_req_set_res(req, res: mask, cflags: 0); |
370 | req->io_task_work.func = io_poll_task_func; |
371 | |
372 | trace_io_uring_task_add(req, mask); |
373 | __io_req_task_work_add(req, flags: IOU_F_TWQ_LAZY_WAKE); |
374 | } |
375 | |
376 | static inline void io_poll_execute(struct io_kiocb *req, int res) |
377 | { |
378 | if (io_poll_get_ownership(req)) |
379 | __io_poll_execute(req, mask: res); |
380 | } |
381 | |
382 | static void io_poll_cancel_req(struct io_kiocb *req) |
383 | { |
384 | io_poll_mark_cancelled(req); |
385 | /* kick tw, which should complete the request */ |
386 | io_poll_execute(req, res: 0); |
387 | } |
388 | |
389 | #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI) |
390 | |
391 | static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) |
392 | { |
393 | io_poll_mark_cancelled(req); |
394 | /* we have to kick tw in case it's not already */ |
395 | io_poll_execute(req, res: 0); |
396 | |
397 | /* |
398 | * If the waitqueue is being freed early but someone is already |
399 | * holds ownership over it, we have to tear down the request as |
400 | * best we can. That means immediately removing the request from |
401 | * its waitqueue and preventing all further accesses to the |
402 | * waitqueue via the request. |
403 | */ |
404 | list_del_init(entry: &poll->wait.entry); |
405 | |
406 | /* |
407 | * Careful: this *must* be the last step, since as soon |
408 | * as req->head is NULL'ed out, the request can be |
409 | * completed and freed, since aio_poll_complete_work() |
410 | * will no longer need to take the waitqueue lock. |
411 | */ |
412 | smp_store_release(&poll->head, NULL); |
413 | return 1; |
414 | } |
415 | |
416 | static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
417 | void *key) |
418 | { |
419 | struct io_kiocb *req = wqe_to_req(wqe: wait); |
420 | struct io_poll *poll = container_of(wait, struct io_poll, wait); |
421 | __poll_t mask = key_to_poll(key); |
422 | |
423 | if (unlikely(mask & POLLFREE)) |
424 | return io_pollfree_wake(req, poll); |
425 | |
426 | /* for instances that support it check for an event match first */ |
427 | if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON))) |
428 | return 0; |
429 | |
430 | if (io_poll_get_ownership(req)) { |
431 | /* |
432 | * If we trigger a multishot poll off our own wakeup path, |
433 | * disable multishot as there is a circular dependency between |
434 | * CQ posting and triggering the event. |
435 | */ |
436 | if (mask & EPOLL_URING_WAKE) |
437 | poll->events |= EPOLLONESHOT; |
438 | |
439 | /* optional, saves extra locking for removal in tw handler */ |
440 | if (mask && poll->events & EPOLLONESHOT) { |
441 | list_del_init(entry: &poll->wait.entry); |
442 | poll->head = NULL; |
443 | if (wqe_is_double(wqe: wait)) |
444 | req->flags &= ~REQ_F_DOUBLE_POLL; |
445 | else |
446 | req->flags &= ~REQ_F_SINGLE_POLL; |
447 | } |
448 | __io_poll_execute(req, mask); |
449 | } |
450 | return 1; |
451 | } |
452 | |
453 | /* fails only when polling is already completing by the first entry */ |
454 | static bool io_poll_double_prepare(struct io_kiocb *req) |
455 | { |
456 | struct wait_queue_head *head; |
457 | struct io_poll *poll = io_poll_get_single(req); |
458 | |
459 | /* head is RCU protected, see io_poll_remove_entries() comments */ |
460 | rcu_read_lock(); |
461 | head = smp_load_acquire(&poll->head); |
462 | /* |
463 | * poll arm might not hold ownership and so race for req->flags with |
464 | * io_poll_wake(). There is only one poll entry queued, serialise with |
465 | * it by taking its head lock. As we're still arming the tw hanlder |
466 | * is not going to be run, so there are no races with it. |
467 | */ |
468 | if (head) { |
469 | spin_lock_irq(lock: &head->lock); |
470 | req->flags |= REQ_F_DOUBLE_POLL; |
471 | if (req->opcode == IORING_OP_POLL_ADD) |
472 | req->flags |= REQ_F_ASYNC_DATA; |
473 | spin_unlock_irq(lock: &head->lock); |
474 | } |
475 | rcu_read_unlock(); |
476 | return !!head; |
477 | } |
478 | |
479 | static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt, |
480 | struct wait_queue_head *head, |
481 | struct io_poll **poll_ptr) |
482 | { |
483 | struct io_kiocb *req = pt->req; |
484 | unsigned long wqe_private = (unsigned long) req; |
485 | |
486 | /* |
487 | * The file being polled uses multiple waitqueues for poll handling |
488 | * (e.g. one for read, one for write). Setup a separate io_poll |
489 | * if this happens. |
490 | */ |
491 | if (unlikely(pt->nr_entries)) { |
492 | struct io_poll *first = poll; |
493 | |
494 | /* double add on the same waitqueue head, ignore */ |
495 | if (first->head == head) |
496 | return; |
497 | /* already have a 2nd entry, fail a third attempt */ |
498 | if (*poll_ptr) { |
499 | if ((*poll_ptr)->head == head) |
500 | return; |
501 | pt->error = -EINVAL; |
502 | return; |
503 | } |
504 | |
505 | poll = kmalloc(size: sizeof(*poll), GFP_ATOMIC); |
506 | if (!poll) { |
507 | pt->error = -ENOMEM; |
508 | return; |
509 | } |
510 | |
511 | /* mark as double wq entry */ |
512 | wqe_private |= IO_WQE_F_DOUBLE; |
513 | io_init_poll_iocb(poll, events: first->events); |
514 | if (!io_poll_double_prepare(req)) { |
515 | /* the request is completing, just back off */ |
516 | kfree(objp: poll); |
517 | return; |
518 | } |
519 | *poll_ptr = poll; |
520 | } else { |
521 | /* fine to modify, there is no poll queued to race with us */ |
522 | req->flags |= REQ_F_SINGLE_POLL; |
523 | } |
524 | |
525 | pt->nr_entries++; |
526 | poll->head = head; |
527 | poll->wait.private = (void *) wqe_private; |
528 | |
529 | if (poll->events & EPOLLEXCLUSIVE) |
530 | add_wait_queue_exclusive(wq_head: head, wq_entry: &poll->wait); |
531 | else |
532 | add_wait_queue(wq_head: head, wq_entry: &poll->wait); |
533 | } |
534 | |
535 | static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, |
536 | struct poll_table_struct *p) |
537 | { |
538 | struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); |
539 | struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll); |
540 | |
541 | __io_queue_proc(poll, pt, head, |
542 | poll_ptr: (struct io_poll **) &pt->req->async_data); |
543 | } |
544 | |
545 | static bool io_poll_can_finish_inline(struct io_kiocb *req, |
546 | struct io_poll_table *pt) |
547 | { |
548 | return pt->owning || io_poll_get_ownership(req); |
549 | } |
550 | |
551 | static void io_poll_add_hash(struct io_kiocb *req) |
552 | { |
553 | if (req->flags & REQ_F_HASH_LOCKED) |
554 | io_poll_req_insert_locked(req); |
555 | else |
556 | io_poll_req_insert(req); |
557 | } |
558 | |
559 | /* |
560 | * Returns 0 when it's handed over for polling. The caller owns the requests if |
561 | * it returns non-zero, but otherwise should not touch it. Negative values |
562 | * contain an error code. When the result is >0, the polling has completed |
563 | * inline and ipt.result_mask is set to the mask. |
564 | */ |
565 | static int __io_arm_poll_handler(struct io_kiocb *req, |
566 | struct io_poll *poll, |
567 | struct io_poll_table *ipt, __poll_t mask, |
568 | unsigned issue_flags) |
569 | { |
570 | struct io_ring_ctx *ctx = req->ctx; |
571 | |
572 | INIT_HLIST_NODE(h: &req->hash_node); |
573 | req->work.cancel_seq = atomic_read(v: &ctx->cancel_seq); |
574 | io_init_poll_iocb(poll, events: mask); |
575 | poll->file = req->file; |
576 | req->apoll_events = poll->events; |
577 | |
578 | ipt->pt._key = mask; |
579 | ipt->req = req; |
580 | ipt->error = 0; |
581 | ipt->nr_entries = 0; |
582 | /* |
583 | * Polling is either completed here or via task_work, so if we're in the |
584 | * task context we're naturally serialised with tw by merit of running |
585 | * the same task. When it's io-wq, take the ownership to prevent tw |
586 | * from running. However, when we're in the task context, skip taking |
587 | * it as an optimisation. |
588 | * |
589 | * Note: even though the request won't be completed/freed, without |
590 | * ownership we still can race with io_poll_wake(). |
591 | * io_poll_can_finish_inline() tries to deal with that. |
592 | */ |
593 | ipt->owning = issue_flags & IO_URING_F_UNLOCKED; |
594 | atomic_set(v: &req->poll_refs, i: (int)ipt->owning); |
595 | |
596 | /* io-wq doesn't hold uring_lock */ |
597 | if (issue_flags & IO_URING_F_UNLOCKED) |
598 | req->flags &= ~REQ_F_HASH_LOCKED; |
599 | |
600 | mask = vfs_poll(file: req->file, pt: &ipt->pt) & poll->events; |
601 | |
602 | if (unlikely(ipt->error || !ipt->nr_entries)) { |
603 | io_poll_remove_entries(req); |
604 | |
605 | if (!io_poll_can_finish_inline(req, pt: ipt)) { |
606 | io_poll_mark_cancelled(req); |
607 | return 0; |
608 | } else if (mask && (poll->events & EPOLLET)) { |
609 | ipt->result_mask = mask; |
610 | return 1; |
611 | } |
612 | return ipt->error ?: -EINVAL; |
613 | } |
614 | |
615 | if (mask && |
616 | ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) { |
617 | if (!io_poll_can_finish_inline(req, pt: ipt)) { |
618 | io_poll_add_hash(req); |
619 | return 0; |
620 | } |
621 | io_poll_remove_entries(req); |
622 | ipt->result_mask = mask; |
623 | /* no one else has access to the req, forget about the ref */ |
624 | return 1; |
625 | } |
626 | |
627 | io_poll_add_hash(req); |
628 | |
629 | if (mask && (poll->events & EPOLLET) && |
630 | io_poll_can_finish_inline(req, pt: ipt)) { |
631 | __io_poll_execute(req, mask); |
632 | return 0; |
633 | } |
634 | |
635 | if (ipt->owning) { |
636 | /* |
637 | * Try to release ownership. If we see a change of state, e.g. |
638 | * poll was waken up, queue up a tw, it'll deal with it. |
639 | */ |
640 | if (atomic_cmpxchg(v: &req->poll_refs, old: 1, new: 0) != 1) |
641 | __io_poll_execute(req, mask: 0); |
642 | } |
643 | return 0; |
644 | } |
645 | |
646 | static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, |
647 | struct poll_table_struct *p) |
648 | { |
649 | struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); |
650 | struct async_poll *apoll = pt->req->apoll; |
651 | |
652 | __io_queue_proc(poll: &apoll->poll, pt, head, poll_ptr: &apoll->double_poll); |
653 | } |
654 | |
655 | /* |
656 | * We can't reliably detect loops in repeated poll triggers and issue |
657 | * subsequently failing. But rather than fail these immediately, allow a |
658 | * certain amount of retries before we give up. Given that this condition |
659 | * should _rarely_ trigger even once, we should be fine with a larger value. |
660 | */ |
661 | #define APOLL_MAX_RETRY 128 |
662 | |
663 | static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, |
664 | unsigned issue_flags) |
665 | { |
666 | struct io_ring_ctx *ctx = req->ctx; |
667 | struct io_cache_entry *entry; |
668 | struct async_poll *apoll; |
669 | |
670 | if (req->flags & REQ_F_POLLED) { |
671 | apoll = req->apoll; |
672 | kfree(objp: apoll->double_poll); |
673 | } else if (!(issue_flags & IO_URING_F_UNLOCKED)) { |
674 | entry = io_alloc_cache_get(cache: &ctx->apoll_cache); |
675 | if (entry == NULL) |
676 | goto alloc_apoll; |
677 | apoll = container_of(entry, struct async_poll, cache); |
678 | apoll->poll.retries = APOLL_MAX_RETRY; |
679 | } else { |
680 | alloc_apoll: |
681 | apoll = kmalloc(size: sizeof(*apoll), GFP_ATOMIC); |
682 | if (unlikely(!apoll)) |
683 | return NULL; |
684 | apoll->poll.retries = APOLL_MAX_RETRY; |
685 | } |
686 | apoll->double_poll = NULL; |
687 | req->apoll = apoll; |
688 | if (unlikely(!--apoll->poll.retries)) |
689 | return NULL; |
690 | return apoll; |
691 | } |
692 | |
693 | int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) |
694 | { |
695 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; |
696 | struct async_poll *apoll; |
697 | struct io_poll_table ipt; |
698 | __poll_t mask = POLLPRI | POLLERR | EPOLLET; |
699 | int ret; |
700 | |
701 | /* |
702 | * apoll requests already grab the mutex to complete in the tw handler, |
703 | * so removal from the mutex-backed hash is free, use it by default. |
704 | */ |
705 | req->flags |= REQ_F_HASH_LOCKED; |
706 | |
707 | if (!def->pollin && !def->pollout) |
708 | return IO_APOLL_ABORTED; |
709 | if (!file_can_poll(file: req->file)) |
710 | return IO_APOLL_ABORTED; |
711 | if (!(req->flags & REQ_F_APOLL_MULTISHOT)) |
712 | mask |= EPOLLONESHOT; |
713 | |
714 | if (def->pollin) { |
715 | mask |= EPOLLIN | EPOLLRDNORM; |
716 | |
717 | /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ |
718 | if (req->flags & REQ_F_CLEAR_POLLIN) |
719 | mask &= ~EPOLLIN; |
720 | } else { |
721 | mask |= EPOLLOUT | EPOLLWRNORM; |
722 | } |
723 | if (def->poll_exclusive) |
724 | mask |= EPOLLEXCLUSIVE; |
725 | |
726 | apoll = io_req_alloc_apoll(req, issue_flags); |
727 | if (!apoll) |
728 | return IO_APOLL_ABORTED; |
729 | req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL); |
730 | req->flags |= REQ_F_POLLED; |
731 | ipt.pt._qproc = io_async_queue_proc; |
732 | |
733 | io_kbuf_recycle(req, issue_flags); |
734 | |
735 | ret = __io_arm_poll_handler(req, poll: &apoll->poll, ipt: &ipt, mask, issue_flags); |
736 | if (ret) |
737 | return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED; |
738 | trace_io_uring_poll_arm(req, mask, events: apoll->poll.events); |
739 | return IO_APOLL_OK; |
740 | } |
741 | |
742 | static __cold bool io_poll_remove_all_table(struct task_struct *tsk, |
743 | struct io_hash_table *table, |
744 | bool cancel_all) |
745 | { |
746 | unsigned nr_buckets = 1U << table->hash_bits; |
747 | struct hlist_node *tmp; |
748 | struct io_kiocb *req; |
749 | bool found = false; |
750 | int i; |
751 | |
752 | for (i = 0; i < nr_buckets; i++) { |
753 | struct io_hash_bucket *hb = &table->hbs[i]; |
754 | |
755 | spin_lock(lock: &hb->lock); |
756 | hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { |
757 | if (io_match_task_safe(head: req, task: tsk, cancel_all)) { |
758 | hlist_del_init(n: &req->hash_node); |
759 | io_poll_cancel_req(req); |
760 | found = true; |
761 | } |
762 | } |
763 | spin_unlock(lock: &hb->lock); |
764 | } |
765 | return found; |
766 | } |
767 | |
768 | /* |
769 | * Returns true if we found and killed one or more poll requests |
770 | */ |
771 | __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, |
772 | bool cancel_all) |
773 | __must_hold(&ctx->uring_lock) |
774 | { |
775 | bool ret; |
776 | |
777 | ret = io_poll_remove_all_table(tsk, table: &ctx->cancel_table, cancel_all); |
778 | ret |= io_poll_remove_all_table(tsk, table: &ctx->cancel_table_locked, cancel_all); |
779 | return ret; |
780 | } |
781 | |
782 | static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, |
783 | struct io_cancel_data *cd, |
784 | struct io_hash_table *table, |
785 | struct io_hash_bucket **out_bucket) |
786 | { |
787 | struct io_kiocb *req; |
788 | u32 index = hash_long(cd->data, table->hash_bits); |
789 | struct io_hash_bucket *hb = &table->hbs[index]; |
790 | |
791 | *out_bucket = NULL; |
792 | |
793 | spin_lock(lock: &hb->lock); |
794 | hlist_for_each_entry(req, &hb->list, hash_node) { |
795 | if (cd->data != req->cqe.user_data) |
796 | continue; |
797 | if (poll_only && req->opcode != IORING_OP_POLL_ADD) |
798 | continue; |
799 | if (cd->flags & IORING_ASYNC_CANCEL_ALL) { |
800 | if (cd->seq == req->work.cancel_seq) |
801 | continue; |
802 | req->work.cancel_seq = cd->seq; |
803 | } |
804 | *out_bucket = hb; |
805 | return req; |
806 | } |
807 | spin_unlock(lock: &hb->lock); |
808 | return NULL; |
809 | } |
810 | |
811 | static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, |
812 | struct io_cancel_data *cd, |
813 | struct io_hash_table *table, |
814 | struct io_hash_bucket **out_bucket) |
815 | { |
816 | unsigned nr_buckets = 1U << table->hash_bits; |
817 | struct io_kiocb *req; |
818 | int i; |
819 | |
820 | *out_bucket = NULL; |
821 | |
822 | for (i = 0; i < nr_buckets; i++) { |
823 | struct io_hash_bucket *hb = &table->hbs[i]; |
824 | |
825 | spin_lock(lock: &hb->lock); |
826 | hlist_for_each_entry(req, &hb->list, hash_node) { |
827 | if (io_cancel_req_match(req, cd)) { |
828 | *out_bucket = hb; |
829 | return req; |
830 | } |
831 | } |
832 | spin_unlock(lock: &hb->lock); |
833 | } |
834 | return NULL; |
835 | } |
836 | |
837 | static int io_poll_disarm(struct io_kiocb *req) |
838 | { |
839 | if (!req) |
840 | return -ENOENT; |
841 | if (!io_poll_get_ownership(req)) |
842 | return -EALREADY; |
843 | io_poll_remove_entries(req); |
844 | hash_del(node: &req->hash_node); |
845 | return 0; |
846 | } |
847 | |
848 | static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, |
849 | struct io_hash_table *table) |
850 | { |
851 | struct io_hash_bucket *bucket; |
852 | struct io_kiocb *req; |
853 | |
854 | if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP | |
855 | IORING_ASYNC_CANCEL_ANY)) |
856 | req = io_poll_file_find(ctx, cd, table, out_bucket: &bucket); |
857 | else |
858 | req = io_poll_find(ctx, poll_only: false, cd, table, out_bucket: &bucket); |
859 | |
860 | if (req) |
861 | io_poll_cancel_req(req); |
862 | if (bucket) |
863 | spin_unlock(lock: &bucket->lock); |
864 | return req ? 0 : -ENOENT; |
865 | } |
866 | |
867 | int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, |
868 | unsigned issue_flags) |
869 | { |
870 | int ret; |
871 | |
872 | ret = __io_poll_cancel(ctx, cd, table: &ctx->cancel_table); |
873 | if (ret != -ENOENT) |
874 | return ret; |
875 | |
876 | io_ring_submit_lock(ctx, issue_flags); |
877 | ret = __io_poll_cancel(ctx, cd, table: &ctx->cancel_table_locked); |
878 | io_ring_submit_unlock(ctx, issue_flags); |
879 | return ret; |
880 | } |
881 | |
882 | static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, |
883 | unsigned int flags) |
884 | { |
885 | u32 events; |
886 | |
887 | events = READ_ONCE(sqe->poll32_events); |
888 | #ifdef __BIG_ENDIAN |
889 | events = swahw32(events); |
890 | #endif |
891 | if (!(flags & IORING_POLL_ADD_MULTI)) |
892 | events |= EPOLLONESHOT; |
893 | if (!(flags & IORING_POLL_ADD_LEVEL)) |
894 | events |= EPOLLET; |
895 | return demangle_poll(val: events) | |
896 | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET)); |
897 | } |
898 | |
899 | int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
900 | { |
901 | struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update); |
902 | u32 flags; |
903 | |
904 | if (sqe->buf_index || sqe->splice_fd_in) |
905 | return -EINVAL; |
906 | flags = READ_ONCE(sqe->len); |
907 | if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | |
908 | IORING_POLL_ADD_MULTI)) |
909 | return -EINVAL; |
910 | /* meaningless without update */ |
911 | if (flags == IORING_POLL_ADD_MULTI) |
912 | return -EINVAL; |
913 | |
914 | upd->old_user_data = READ_ONCE(sqe->addr); |
915 | upd->update_events = flags & IORING_POLL_UPDATE_EVENTS; |
916 | upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA; |
917 | |
918 | upd->new_user_data = READ_ONCE(sqe->off); |
919 | if (!upd->update_user_data && upd->new_user_data) |
920 | return -EINVAL; |
921 | if (upd->update_events) |
922 | upd->events = io_poll_parse_events(sqe, flags); |
923 | else if (sqe->poll32_events) |
924 | return -EINVAL; |
925 | |
926 | return 0; |
927 | } |
928 | |
929 | int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
930 | { |
931 | struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); |
932 | u32 flags; |
933 | |
934 | if (sqe->buf_index || sqe->off || sqe->addr) |
935 | return -EINVAL; |
936 | flags = READ_ONCE(sqe->len); |
937 | if (flags & ~IORING_POLL_ADD_MULTI) |
938 | return -EINVAL; |
939 | if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) |
940 | return -EINVAL; |
941 | |
942 | poll->events = io_poll_parse_events(sqe, flags); |
943 | return 0; |
944 | } |
945 | |
946 | int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) |
947 | { |
948 | struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); |
949 | struct io_poll_table ipt; |
950 | int ret; |
951 | |
952 | ipt.pt._qproc = io_poll_queue_proc; |
953 | |
954 | /* |
955 | * If sqpoll or single issuer, there is no contention for ->uring_lock |
956 | * and we'll end up holding it in tw handlers anyway. |
957 | */ |
958 | if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER)) |
959 | req->flags |= REQ_F_HASH_LOCKED; |
960 | |
961 | ret = __io_arm_poll_handler(req, poll, ipt: &ipt, mask: poll->events, issue_flags); |
962 | if (ret > 0) { |
963 | io_req_set_res(req, res: ipt.result_mask, cflags: 0); |
964 | return IOU_OK; |
965 | } |
966 | return ret ?: IOU_ISSUE_SKIP_COMPLETE; |
967 | } |
968 | |
969 | int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) |
970 | { |
971 | struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update); |
972 | struct io_ring_ctx *ctx = req->ctx; |
973 | struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, }; |
974 | struct io_hash_bucket *bucket; |
975 | struct io_kiocb *preq; |
976 | int ret2, ret = 0; |
977 | struct io_tw_state ts = { .locked = true }; |
978 | |
979 | io_ring_submit_lock(ctx, issue_flags); |
980 | preq = io_poll_find(ctx, poll_only: true, cd: &cd, table: &ctx->cancel_table, out_bucket: &bucket); |
981 | ret2 = io_poll_disarm(req: preq); |
982 | if (bucket) |
983 | spin_unlock(lock: &bucket->lock); |
984 | if (!ret2) |
985 | goto found; |
986 | if (ret2 != -ENOENT) { |
987 | ret = ret2; |
988 | goto out; |
989 | } |
990 | |
991 | preq = io_poll_find(ctx, poll_only: true, cd: &cd, table: &ctx->cancel_table_locked, out_bucket: &bucket); |
992 | ret2 = io_poll_disarm(req: preq); |
993 | if (bucket) |
994 | spin_unlock(lock: &bucket->lock); |
995 | if (ret2) { |
996 | ret = ret2; |
997 | goto out; |
998 | } |
999 | |
1000 | found: |
1001 | if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) { |
1002 | ret = -EFAULT; |
1003 | goto out; |
1004 | } |
1005 | |
1006 | if (poll_update->update_events || poll_update->update_user_data) { |
1007 | /* only mask one event flags, keep behavior flags */ |
1008 | if (poll_update->update_events) { |
1009 | struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll); |
1010 | |
1011 | poll->events &= ~0xffff; |
1012 | poll->events |= poll_update->events & 0xffff; |
1013 | poll->events |= IO_POLL_UNMASK; |
1014 | } |
1015 | if (poll_update->update_user_data) |
1016 | preq->cqe.user_data = poll_update->new_user_data; |
1017 | |
1018 | ret2 = io_poll_add(req: preq, issue_flags: issue_flags & ~IO_URING_F_UNLOCKED); |
1019 | /* successfully updated, don't complete poll request */ |
1020 | if (!ret2 || ret2 == -EIOCBQUEUED) |
1021 | goto out; |
1022 | } |
1023 | |
1024 | req_set_fail(req: preq); |
1025 | io_req_set_res(req: preq, res: -ECANCELED, cflags: 0); |
1026 | io_req_task_complete(req: preq, ts: &ts); |
1027 | out: |
1028 | io_ring_submit_unlock(ctx, issue_flags); |
1029 | if (ret < 0) { |
1030 | req_set_fail(req); |
1031 | return ret; |
1032 | } |
1033 | /* complete update request, we're done with it */ |
1034 | io_req_set_res(req, res: ret, cflags: 0); |
1035 | return IOU_OK; |
1036 | } |
1037 | |
1038 | void io_apoll_cache_free(struct io_cache_entry *entry) |
1039 | { |
1040 | kfree(container_of(entry, struct async_poll, cache)); |
1041 | } |
1042 | |