1 | /* |
---|---|
2 | * An async IO implementation for Linux |
3 | * Written by Benjamin LaHaise <bcrl@kvack.org> |
4 | * |
5 | * Implements an efficient asynchronous io interface. |
6 | * |
7 | * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. |
8 | * Copyright 2018 Christoph Hellwig. |
9 | * |
10 | * See ../COPYING for licensing terms. |
11 | */ |
12 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
13 | |
14 | #include <linux/kernel.h> |
15 | #include <linux/init.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/time.h> |
18 | #include <linux/aio_abi.h> |
19 | #include <linux/export.h> |
20 | #include <linux/syscalls.h> |
21 | #include <linux/backing-dev.h> |
22 | #include <linux/refcount.h> |
23 | #include <linux/uio.h> |
24 | |
25 | #include <linux/sched/signal.h> |
26 | #include <linux/fs.h> |
27 | #include <linux/file.h> |
28 | #include <linux/mm.h> |
29 | #include <linux/mman.h> |
30 | #include <linux/percpu.h> |
31 | #include <linux/slab.h> |
32 | #include <linux/timer.h> |
33 | #include <linux/aio.h> |
34 | #include <linux/highmem.h> |
35 | #include <linux/workqueue.h> |
36 | #include <linux/security.h> |
37 | #include <linux/eventfd.h> |
38 | #include <linux/blkdev.h> |
39 | #include <linux/compat.h> |
40 | #include <linux/migrate.h> |
41 | #include <linux/ramfs.h> |
42 | #include <linux/percpu-refcount.h> |
43 | #include <linux/mount.h> |
44 | #include <linux/pseudo_fs.h> |
45 | |
46 | #include <linux/uaccess.h> |
47 | #include <linux/nospec.h> |
48 | |
49 | #include "internal.h" |
50 | |
51 | #define KIOCB_KEY 0 |
52 | |
53 | #define AIO_RING_MAGIC 0xa10a10a1 |
54 | #define AIO_RING_COMPAT_FEATURES 1 |
55 | #define AIO_RING_INCOMPAT_FEATURES 0 |
56 | struct aio_ring { |
57 | unsigned id; /* kernel internal index number */ |
58 | unsigned nr; /* number of io_events */ |
59 | unsigned head; /* Written to by userland or under ring_lock |
60 | * mutex by aio_read_events_ring(). */ |
61 | unsigned tail; |
62 | |
63 | unsigned magic; |
64 | unsigned compat_features; |
65 | unsigned incompat_features; |
66 | unsigned header_length; /* size of aio_ring */ |
67 | |
68 | |
69 | struct io_event io_events[]; |
70 | }; /* 128 bytes + ring size */ |
71 | |
72 | /* |
73 | * Plugging is meant to work with larger batches of IOs. If we don't |
74 | * have more than the below, then don't bother setting up a plug. |
75 | */ |
76 | #define AIO_PLUG_THRESHOLD 2 |
77 | |
78 | #define AIO_RING_PAGES 8 |
79 | |
80 | struct kioctx_table { |
81 | struct rcu_head rcu; |
82 | unsigned nr; |
83 | struct kioctx __rcu *table[] __counted_by(nr); |
84 | }; |
85 | |
86 | struct kioctx_cpu { |
87 | unsigned reqs_available; |
88 | }; |
89 | |
90 | struct ctx_rq_wait { |
91 | struct completion comp; |
92 | atomic_t count; |
93 | }; |
94 | |
95 | struct kioctx { |
96 | struct percpu_ref users; |
97 | atomic_t dead; |
98 | |
99 | struct percpu_ref reqs; |
100 | |
101 | unsigned long user_id; |
102 | |
103 | struct __percpu kioctx_cpu *cpu; |
104 | |
105 | /* |
106 | * For percpu reqs_available, number of slots we move to/from global |
107 | * counter at a time: |
108 | */ |
109 | unsigned req_batch; |
110 | /* |
111 | * This is what userspace passed to io_setup(), it's not used for |
112 | * anything but counting against the global max_reqs quota. |
113 | * |
114 | * The real limit is nr_events - 1, which will be larger (see |
115 | * aio_setup_ring()) |
116 | */ |
117 | unsigned max_reqs; |
118 | |
119 | /* Size of ringbuffer, in units of struct io_event */ |
120 | unsigned nr_events; |
121 | |
122 | unsigned long mmap_base; |
123 | unsigned long mmap_size; |
124 | |
125 | struct page **ring_pages; |
126 | long nr_pages; |
127 | |
128 | struct rcu_work free_rwork; /* see free_ioctx() */ |
129 | |
130 | /* |
131 | * signals when all in-flight requests are done |
132 | */ |
133 | struct ctx_rq_wait *rq_wait; |
134 | |
135 | struct { |
136 | /* |
137 | * This counts the number of available slots in the ringbuffer, |
138 | * so we avoid overflowing it: it's decremented (if positive) |
139 | * when allocating a kiocb and incremented when the resulting |
140 | * io_event is pulled off the ringbuffer. |
141 | * |
142 | * We batch accesses to it with a percpu version. |
143 | */ |
144 | atomic_t reqs_available; |
145 | } ____cacheline_aligned_in_smp; |
146 | |
147 | struct { |
148 | spinlock_t ctx_lock; |
149 | struct list_head active_reqs; /* used for cancellation */ |
150 | } ____cacheline_aligned_in_smp; |
151 | |
152 | struct { |
153 | struct mutex ring_lock; |
154 | wait_queue_head_t wait; |
155 | } ____cacheline_aligned_in_smp; |
156 | |
157 | struct { |
158 | unsigned tail; |
159 | unsigned completed_events; |
160 | spinlock_t completion_lock; |
161 | } ____cacheline_aligned_in_smp; |
162 | |
163 | struct page *internal_pages[AIO_RING_PAGES]; |
164 | struct file *aio_ring_file; |
165 | |
166 | unsigned id; |
167 | }; |
168 | |
169 | /* |
170 | * First field must be the file pointer in all the |
171 | * iocb unions! See also 'struct kiocb' in <linux/fs.h> |
172 | */ |
173 | struct fsync_iocb { |
174 | struct file *file; |
175 | struct work_struct work; |
176 | bool datasync; |
177 | struct cred *creds; |
178 | }; |
179 | |
180 | struct poll_iocb { |
181 | struct file *file; |
182 | struct wait_queue_head *head; |
183 | __poll_t events; |
184 | bool cancelled; |
185 | bool work_scheduled; |
186 | bool work_need_resched; |
187 | struct wait_queue_entry wait; |
188 | struct work_struct work; |
189 | }; |
190 | |
191 | /* |
192 | * NOTE! Each of the iocb union members has the file pointer |
193 | * as the first entry in their struct definition. So you can |
194 | * access the file pointer through any of the sub-structs, |
195 | * or directly as just 'ki_filp' in this struct. |
196 | */ |
197 | struct aio_kiocb { |
198 | union { |
199 | struct file *ki_filp; |
200 | struct kiocb rw; |
201 | struct fsync_iocb fsync; |
202 | struct poll_iocb poll; |
203 | }; |
204 | |
205 | struct kioctx *ki_ctx; |
206 | kiocb_cancel_fn *ki_cancel; |
207 | |
208 | struct io_event ki_res; |
209 | |
210 | struct list_head ki_list; /* the aio core uses this |
211 | * for cancellation */ |
212 | refcount_t ki_refcnt; |
213 | |
214 | /* |
215 | * If the aio_resfd field of the userspace iocb is not zero, |
216 | * this is the underlying eventfd context to deliver events to. |
217 | */ |
218 | struct eventfd_ctx *ki_eventfd; |
219 | }; |
220 | |
221 | /*------ sysctl variables----*/ |
222 | static DEFINE_SPINLOCK(aio_nr_lock); |
223 | static unsigned long aio_nr; /* current system wide number of aio requests */ |
224 | static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ |
225 | /*----end sysctl variables---*/ |
226 | #ifdef CONFIG_SYSCTL |
227 | static struct ctl_table aio_sysctls[] = { |
228 | { |
229 | .procname = "aio-nr", |
230 | .data = &aio_nr, |
231 | .maxlen = sizeof(aio_nr), |
232 | .mode = 0444, |
233 | .proc_handler = proc_doulongvec_minmax, |
234 | }, |
235 | { |
236 | .procname = "aio-max-nr", |
237 | .data = &aio_max_nr, |
238 | .maxlen = sizeof(aio_max_nr), |
239 | .mode = 0644, |
240 | .proc_handler = proc_doulongvec_minmax, |
241 | }, |
242 | }; |
243 | |
244 | static void __init aio_sysctl_init(void) |
245 | { |
246 | register_sysctl_init("fs", aio_sysctls); |
247 | } |
248 | #else |
249 | #define aio_sysctl_init() do { } while (0) |
250 | #endif |
251 | |
252 | static struct kmem_cache *kiocb_cachep; |
253 | static struct kmem_cache *kioctx_cachep; |
254 | |
255 | static struct vfsmount *aio_mnt; |
256 | |
257 | static const struct file_operations aio_ring_fops; |
258 | static const struct address_space_operations aio_ctx_aops; |
259 | |
260 | static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) |
261 | { |
262 | struct file *file; |
263 | struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); |
264 | if (IS_ERR(ptr: inode)) |
265 | return ERR_CAST(ptr: inode); |
266 | |
267 | inode->i_mapping->a_ops = &aio_ctx_aops; |
268 | inode->i_mapping->i_private_data = ctx; |
269 | inode->i_size = PAGE_SIZE * nr_pages; |
270 | |
271 | file = alloc_file_pseudo(inode, aio_mnt, "[aio]", |
272 | O_RDWR, &aio_ring_fops); |
273 | if (IS_ERR(ptr: file)) |
274 | iput(inode); |
275 | return file; |
276 | } |
277 | |
278 | static int aio_init_fs_context(struct fs_context *fc) |
279 | { |
280 | if (!init_pseudo(fc, AIO_RING_MAGIC)) |
281 | return -ENOMEM; |
282 | fc->s_iflags |= SB_I_NOEXEC; |
283 | return 0; |
284 | } |
285 | |
286 | /* aio_setup |
287 | * Creates the slab caches used by the aio routines, panic on |
288 | * failure as this is done early during the boot sequence. |
289 | */ |
290 | static int __init aio_setup(void) |
291 | { |
292 | static struct file_system_type aio_fs = { |
293 | .name = "aio", |
294 | .init_fs_context = aio_init_fs_context, |
295 | .kill_sb = kill_anon_super, |
296 | }; |
297 | aio_mnt = kern_mount(&aio_fs); |
298 | if (IS_ERR(ptr: aio_mnt)) |
299 | panic(fmt: "Failed to create aio fs mount."); |
300 | |
301 | kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
302 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
303 | aio_sysctl_init(); |
304 | return 0; |
305 | } |
306 | __initcall(aio_setup); |
307 | |
308 | static void put_aio_ring_file(struct kioctx *ctx) |
309 | { |
310 | struct file *aio_ring_file = ctx->aio_ring_file; |
311 | struct address_space *i_mapping; |
312 | |
313 | if (aio_ring_file) { |
314 | truncate_setsize(inode: file_inode(f: aio_ring_file), newsize: 0); |
315 | |
316 | /* Prevent further access to the kioctx from migratepages */ |
317 | i_mapping = aio_ring_file->f_mapping; |
318 | spin_lock(lock: &i_mapping->i_private_lock); |
319 | i_mapping->i_private_data = NULL; |
320 | ctx->aio_ring_file = NULL; |
321 | spin_unlock(lock: &i_mapping->i_private_lock); |
322 | |
323 | fput(aio_ring_file); |
324 | } |
325 | } |
326 | |
327 | static void aio_free_ring(struct kioctx *ctx) |
328 | { |
329 | int i; |
330 | |
331 | /* Disconnect the kiotx from the ring file. This prevents future |
332 | * accesses to the kioctx from page migration. |
333 | */ |
334 | put_aio_ring_file(ctx); |
335 | |
336 | for (i = 0; i < ctx->nr_pages; i++) { |
337 | struct page *page; |
338 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, |
339 | page_count(ctx->ring_pages[i])); |
340 | page = ctx->ring_pages[i]; |
341 | if (!page) |
342 | continue; |
343 | ctx->ring_pages[i] = NULL; |
344 | put_page(page); |
345 | } |
346 | |
347 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { |
348 | kfree(objp: ctx->ring_pages); |
349 | ctx->ring_pages = NULL; |
350 | } |
351 | } |
352 | |
353 | static int aio_ring_mremap(struct vm_area_struct *vma) |
354 | { |
355 | struct file *file = vma->vm_file; |
356 | struct mm_struct *mm = vma->vm_mm; |
357 | struct kioctx_table *table; |
358 | int i, res = -EINVAL; |
359 | |
360 | spin_lock(lock: &mm->ioctx_lock); |
361 | rcu_read_lock(); |
362 | table = rcu_dereference(mm->ioctx_table); |
363 | if (!table) |
364 | goto out_unlock; |
365 | |
366 | for (i = 0; i < table->nr; i++) { |
367 | struct kioctx *ctx; |
368 | |
369 | ctx = rcu_dereference(table->table[i]); |
370 | if (ctx && ctx->aio_ring_file == file) { |
371 | if (!atomic_read(v: &ctx->dead)) { |
372 | ctx->user_id = ctx->mmap_base = vma->vm_start; |
373 | res = 0; |
374 | } |
375 | break; |
376 | } |
377 | } |
378 | |
379 | out_unlock: |
380 | rcu_read_unlock(); |
381 | spin_unlock(lock: &mm->ioctx_lock); |
382 | return res; |
383 | } |
384 | |
385 | static const struct vm_operations_struct aio_ring_vm_ops = { |
386 | .mremap = aio_ring_mremap, |
387 | #if IS_ENABLED(CONFIG_MMU) |
388 | .fault = filemap_fault, |
389 | .map_pages = filemap_map_pages, |
390 | .page_mkwrite = filemap_page_mkwrite, |
391 | #endif |
392 | }; |
393 | |
394 | static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) |
395 | { |
396 | vm_flags_set(vma, VM_DONTEXPAND); |
397 | vma->vm_ops = &aio_ring_vm_ops; |
398 | return 0; |
399 | } |
400 | |
401 | static const struct file_operations aio_ring_fops = { |
402 | .mmap = aio_ring_mmap, |
403 | }; |
404 | |
405 | #if IS_ENABLED(CONFIG_MIGRATION) |
406 | static int aio_migrate_folio(struct address_space *mapping, struct folio *dst, |
407 | struct folio *src, enum migrate_mode mode) |
408 | { |
409 | struct kioctx *ctx; |
410 | unsigned long flags; |
411 | pgoff_t idx; |
412 | int rc; |
413 | |
414 | /* |
415 | * We cannot support the _NO_COPY case here, because copy needs to |
416 | * happen under the ctx->completion_lock. That does not work with the |
417 | * migration workflow of MIGRATE_SYNC_NO_COPY. |
418 | */ |
419 | if (mode == MIGRATE_SYNC_NO_COPY) |
420 | return -EINVAL; |
421 | |
422 | rc = 0; |
423 | |
424 | /* mapping->i_private_lock here protects against the kioctx teardown. */ |
425 | spin_lock(lock: &mapping->i_private_lock); |
426 | ctx = mapping->i_private_data; |
427 | if (!ctx) { |
428 | rc = -EINVAL; |
429 | goto out; |
430 | } |
431 | |
432 | /* The ring_lock mutex. The prevents aio_read_events() from writing |
433 | * to the ring's head, and prevents page migration from mucking in |
434 | * a partially initialized kiotx. |
435 | */ |
436 | if (!mutex_trylock(lock: &ctx->ring_lock)) { |
437 | rc = -EAGAIN; |
438 | goto out; |
439 | } |
440 | |
441 | idx = src->index; |
442 | if (idx < (pgoff_t)ctx->nr_pages) { |
443 | /* Make sure the old folio hasn't already been changed */ |
444 | if (ctx->ring_pages[idx] != &src->page) |
445 | rc = -EAGAIN; |
446 | } else |
447 | rc = -EINVAL; |
448 | |
449 | if (rc != 0) |
450 | goto out_unlock; |
451 | |
452 | /* Writeback must be complete */ |
453 | BUG_ON(folio_test_writeback(src)); |
454 | folio_get(folio: dst); |
455 | |
456 | rc = folio_migrate_mapping(mapping, newfolio: dst, folio: src, extra_count: 1); |
457 | if (rc != MIGRATEPAGE_SUCCESS) { |
458 | folio_put(folio: dst); |
459 | goto out_unlock; |
460 | } |
461 | |
462 | /* Take completion_lock to prevent other writes to the ring buffer |
463 | * while the old folio is copied to the new. This prevents new |
464 | * events from being lost. |
465 | */ |
466 | spin_lock_irqsave(&ctx->completion_lock, flags); |
467 | folio_migrate_copy(newfolio: dst, folio: src); |
468 | BUG_ON(ctx->ring_pages[idx] != &src->page); |
469 | ctx->ring_pages[idx] = &dst->page; |
470 | spin_unlock_irqrestore(lock: &ctx->completion_lock, flags); |
471 | |
472 | /* The old folio is no longer accessible. */ |
473 | folio_put(folio: src); |
474 | |
475 | out_unlock: |
476 | mutex_unlock(lock: &ctx->ring_lock); |
477 | out: |
478 | spin_unlock(lock: &mapping->i_private_lock); |
479 | return rc; |
480 | } |
481 | #else |
482 | #define aio_migrate_folio NULL |
483 | #endif |
484 | |
485 | static const struct address_space_operations aio_ctx_aops = { |
486 | .dirty_folio = noop_dirty_folio, |
487 | .migrate_folio = aio_migrate_folio, |
488 | }; |
489 | |
490 | static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) |
491 | { |
492 | struct aio_ring *ring; |
493 | struct mm_struct *mm = current->mm; |
494 | unsigned long size, unused; |
495 | int nr_pages; |
496 | int i; |
497 | struct file *file; |
498 | |
499 | /* Compensate for the ring buffer's head/tail overlap entry */ |
500 | nr_events += 2; /* 1 is required, 2 for good luck */ |
501 | |
502 | size = sizeof(struct aio_ring); |
503 | size += sizeof(struct io_event) * nr_events; |
504 | |
505 | nr_pages = PFN_UP(size); |
506 | if (nr_pages < 0) |
507 | return -EINVAL; |
508 | |
509 | file = aio_private_file(ctx, nr_pages); |
510 | if (IS_ERR(ptr: file)) { |
511 | ctx->aio_ring_file = NULL; |
512 | return -ENOMEM; |
513 | } |
514 | |
515 | ctx->aio_ring_file = file; |
516 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) |
517 | / sizeof(struct io_event); |
518 | |
519 | ctx->ring_pages = ctx->internal_pages; |
520 | if (nr_pages > AIO_RING_PAGES) { |
521 | ctx->ring_pages = kcalloc(n: nr_pages, size: sizeof(struct page *), |
522 | GFP_KERNEL); |
523 | if (!ctx->ring_pages) { |
524 | put_aio_ring_file(ctx); |
525 | return -ENOMEM; |
526 | } |
527 | } |
528 | |
529 | for (i = 0; i < nr_pages; i++) { |
530 | struct page *page; |
531 | page = find_or_create_page(mapping: file->f_mapping, |
532 | index: i, GFP_USER | __GFP_ZERO); |
533 | if (!page) |
534 | break; |
535 | pr_debug("pid(%d) page[%d]->count=%d\n", |
536 | current->pid, i, page_count(page)); |
537 | SetPageUptodate(page); |
538 | unlock_page(page); |
539 | |
540 | ctx->ring_pages[i] = page; |
541 | } |
542 | ctx->nr_pages = i; |
543 | |
544 | if (unlikely(i != nr_pages)) { |
545 | aio_free_ring(ctx); |
546 | return -ENOMEM; |
547 | } |
548 | |
549 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
550 | pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); |
551 | |
552 | if (mmap_write_lock_killable(mm)) { |
553 | ctx->mmap_size = 0; |
554 | aio_free_ring(ctx); |
555 | return -EINTR; |
556 | } |
557 | |
558 | ctx->mmap_base = do_mmap(file: ctx->aio_ring_file, addr: 0, len: ctx->mmap_size, |
559 | PROT_READ | PROT_WRITE, |
560 | MAP_SHARED, vm_flags: 0, pgoff: 0, populate: &unused, NULL); |
561 | mmap_write_unlock(mm); |
562 | if (IS_ERR(ptr: (void *)ctx->mmap_base)) { |
563 | ctx->mmap_size = 0; |
564 | aio_free_ring(ctx); |
565 | return -ENOMEM; |
566 | } |
567 | |
568 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
569 | |
570 | ctx->user_id = ctx->mmap_base; |
571 | ctx->nr_events = nr_events; /* trusted copy */ |
572 | |
573 | ring = page_address(ctx->ring_pages[0]); |
574 | ring->nr = nr_events; /* user copy */ |
575 | ring->id = ~0U; |
576 | ring->head = ring->tail = 0; |
577 | ring->magic = AIO_RING_MAGIC; |
578 | ring->compat_features = AIO_RING_COMPAT_FEATURES; |
579 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; |
580 | ring->header_length = sizeof(struct aio_ring); |
581 | flush_dcache_page(page: ctx->ring_pages[0]); |
582 | |
583 | return 0; |
584 | } |
585 | |
586 | #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) |
587 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) |
588 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) |
589 | |
590 | void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) |
591 | { |
592 | struct aio_kiocb *req; |
593 | struct kioctx *ctx; |
594 | unsigned long flags; |
595 | |
596 | /* |
597 | * kiocb didn't come from aio or is neither a read nor a write, hence |
598 | * ignore it. |
599 | */ |
600 | if (!(iocb->ki_flags & IOCB_AIO_RW)) |
601 | return; |
602 | |
603 | req = container_of(iocb, struct aio_kiocb, rw); |
604 | |
605 | if (WARN_ON_ONCE(!list_empty(&req->ki_list))) |
606 | return; |
607 | |
608 | ctx = req->ki_ctx; |
609 | |
610 | spin_lock_irqsave(&ctx->ctx_lock, flags); |
611 | list_add_tail(new: &req->ki_list, head: &ctx->active_reqs); |
612 | req->ki_cancel = cancel; |
613 | spin_unlock_irqrestore(lock: &ctx->ctx_lock, flags); |
614 | } |
615 | EXPORT_SYMBOL(kiocb_set_cancel_fn); |
616 | |
617 | /* |
618 | * free_ioctx() should be RCU delayed to synchronize against the RCU |
619 | * protected lookup_ioctx() and also needs process context to call |
620 | * aio_free_ring(). Use rcu_work. |
621 | */ |
622 | static void free_ioctx(struct work_struct *work) |
623 | { |
624 | struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, |
625 | free_rwork); |
626 | pr_debug("freeing %p\n", ctx); |
627 | |
628 | aio_free_ring(ctx); |
629 | free_percpu(pdata: ctx->cpu); |
630 | percpu_ref_exit(ref: &ctx->reqs); |
631 | percpu_ref_exit(ref: &ctx->users); |
632 | kmem_cache_free(s: kioctx_cachep, objp: ctx); |
633 | } |
634 | |
635 | static void free_ioctx_reqs(struct percpu_ref *ref) |
636 | { |
637 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); |
638 | |
639 | /* At this point we know that there are no any in-flight requests */ |
640 | if (ctx->rq_wait && atomic_dec_and_test(v: &ctx->rq_wait->count)) |
641 | complete(&ctx->rq_wait->comp); |
642 | |
643 | /* Synchronize against RCU protected table->table[] dereferences */ |
644 | INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); |
645 | queue_rcu_work(wq: system_wq, rwork: &ctx->free_rwork); |
646 | } |
647 | |
648 | /* |
649 | * When this function runs, the kioctx has been removed from the "hash table" |
650 | * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - |
651 | * now it's safe to cancel any that need to be. |
652 | */ |
653 | static void free_ioctx_users(struct percpu_ref *ref) |
654 | { |
655 | struct kioctx *ctx = container_of(ref, struct kioctx, users); |
656 | struct aio_kiocb *req; |
657 | |
658 | spin_lock_irq(lock: &ctx->ctx_lock); |
659 | |
660 | while (!list_empty(head: &ctx->active_reqs)) { |
661 | req = list_first_entry(&ctx->active_reqs, |
662 | struct aio_kiocb, ki_list); |
663 | req->ki_cancel(&req->rw); |
664 | list_del_init(entry: &req->ki_list); |
665 | } |
666 | |
667 | spin_unlock_irq(lock: &ctx->ctx_lock); |
668 | |
669 | percpu_ref_kill(ref: &ctx->reqs); |
670 | percpu_ref_put(ref: &ctx->reqs); |
671 | } |
672 | |
673 | static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) |
674 | { |
675 | unsigned i, new_nr; |
676 | struct kioctx_table *table, *old; |
677 | struct aio_ring *ring; |
678 | |
679 | spin_lock(lock: &mm->ioctx_lock); |
680 | table = rcu_dereference_raw(mm->ioctx_table); |
681 | |
682 | while (1) { |
683 | if (table) |
684 | for (i = 0; i < table->nr; i++) |
685 | if (!rcu_access_pointer(table->table[i])) { |
686 | ctx->id = i; |
687 | rcu_assign_pointer(table->table[i], ctx); |
688 | spin_unlock(lock: &mm->ioctx_lock); |
689 | |
690 | /* While kioctx setup is in progress, |
691 | * we are protected from page migration |
692 | * changes ring_pages by ->ring_lock. |
693 | */ |
694 | ring = page_address(ctx->ring_pages[0]); |
695 | ring->id = ctx->id; |
696 | return 0; |
697 | } |
698 | |
699 | new_nr = (table ? table->nr : 1) * 4; |
700 | spin_unlock(lock: &mm->ioctx_lock); |
701 | |
702 | table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL); |
703 | if (!table) |
704 | return -ENOMEM; |
705 | |
706 | table->nr = new_nr; |
707 | |
708 | spin_lock(lock: &mm->ioctx_lock); |
709 | old = rcu_dereference_raw(mm->ioctx_table); |
710 | |
711 | if (!old) { |
712 | rcu_assign_pointer(mm->ioctx_table, table); |
713 | } else if (table->nr > old->nr) { |
714 | memcpy(table->table, old->table, |
715 | old->nr * sizeof(struct kioctx *)); |
716 | |
717 | rcu_assign_pointer(mm->ioctx_table, table); |
718 | kfree_rcu(old, rcu); |
719 | } else { |
720 | kfree(objp: table); |
721 | table = old; |
722 | } |
723 | } |
724 | } |
725 | |
726 | static void aio_nr_sub(unsigned nr) |
727 | { |
728 | spin_lock(lock: &aio_nr_lock); |
729 | if (WARN_ON(aio_nr - nr > aio_nr)) |
730 | aio_nr = 0; |
731 | else |
732 | aio_nr -= nr; |
733 | spin_unlock(lock: &aio_nr_lock); |
734 | } |
735 | |
736 | /* ioctx_alloc |
737 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. |
738 | */ |
739 | static struct kioctx *ioctx_alloc(unsigned nr_events) |
740 | { |
741 | struct mm_struct *mm = current->mm; |
742 | struct kioctx *ctx; |
743 | int err = -ENOMEM; |
744 | |
745 | /* |
746 | * Store the original nr_events -- what userspace passed to io_setup(), |
747 | * for counting against the global limit -- before it changes. |
748 | */ |
749 | unsigned int max_reqs = nr_events; |
750 | |
751 | /* |
752 | * We keep track of the number of available ringbuffer slots, to prevent |
753 | * overflow (reqs_available), and we also use percpu counters for this. |
754 | * |
755 | * So since up to half the slots might be on other cpu's percpu counters |
756 | * and unavailable, double nr_events so userspace sees what they |
757 | * expected: additionally, we move req_batch slots to/from percpu |
758 | * counters at a time, so make sure that isn't 0: |
759 | */ |
760 | nr_events = max(nr_events, num_possible_cpus() * 4); |
761 | nr_events *= 2; |
762 | |
763 | /* Prevent overflows */ |
764 | if (nr_events > (0x10000000U / sizeof(struct io_event))) { |
765 | pr_debug("ENOMEM: nr_events too high\n"); |
766 | return ERR_PTR(error: -EINVAL); |
767 | } |
768 | |
769 | if (!nr_events || (unsigned long)max_reqs > aio_max_nr) |
770 | return ERR_PTR(error: -EAGAIN); |
771 | |
772 | ctx = kmem_cache_zalloc(k: kioctx_cachep, GFP_KERNEL); |
773 | if (!ctx) |
774 | return ERR_PTR(error: -ENOMEM); |
775 | |
776 | ctx->max_reqs = max_reqs; |
777 | |
778 | spin_lock_init(&ctx->ctx_lock); |
779 | spin_lock_init(&ctx->completion_lock); |
780 | mutex_init(&ctx->ring_lock); |
781 | /* Protect against page migration throughout kiotx setup by keeping |
782 | * the ring_lock mutex held until setup is complete. */ |
783 | mutex_lock(&ctx->ring_lock); |
784 | init_waitqueue_head(&ctx->wait); |
785 | |
786 | INIT_LIST_HEAD(list: &ctx->active_reqs); |
787 | |
788 | if (percpu_ref_init(ref: &ctx->users, release: free_ioctx_users, flags: 0, GFP_KERNEL)) |
789 | goto err; |
790 | |
791 | if (percpu_ref_init(ref: &ctx->reqs, release: free_ioctx_reqs, flags: 0, GFP_KERNEL)) |
792 | goto err; |
793 | |
794 | ctx->cpu = alloc_percpu(struct kioctx_cpu); |
795 | if (!ctx->cpu) |
796 | goto err; |
797 | |
798 | err = aio_setup_ring(ctx, nr_events); |
799 | if (err < 0) |
800 | goto err; |
801 | |
802 | atomic_set(v: &ctx->reqs_available, i: ctx->nr_events - 1); |
803 | ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); |
804 | if (ctx->req_batch < 1) |
805 | ctx->req_batch = 1; |
806 | |
807 | /* limit the number of system wide aios */ |
808 | spin_lock(lock: &aio_nr_lock); |
809 | if (aio_nr + ctx->max_reqs > aio_max_nr || |
810 | aio_nr + ctx->max_reqs < aio_nr) { |
811 | spin_unlock(lock: &aio_nr_lock); |
812 | err = -EAGAIN; |
813 | goto err_ctx; |
814 | } |
815 | aio_nr += ctx->max_reqs; |
816 | spin_unlock(lock: &aio_nr_lock); |
817 | |
818 | percpu_ref_get(ref: &ctx->users); /* io_setup() will drop this ref */ |
819 | percpu_ref_get(ref: &ctx->reqs); /* free_ioctx_users() will drop this */ |
820 | |
821 | err = ioctx_add_table(ctx, mm); |
822 | if (err) |
823 | goto err_cleanup; |
824 | |
825 | /* Release the ring_lock mutex now that all setup is complete. */ |
826 | mutex_unlock(lock: &ctx->ring_lock); |
827 | |
828 | pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", |
829 | ctx, ctx->user_id, mm, ctx->nr_events); |
830 | return ctx; |
831 | |
832 | err_cleanup: |
833 | aio_nr_sub(nr: ctx->max_reqs); |
834 | err_ctx: |
835 | atomic_set(v: &ctx->dead, i: 1); |
836 | if (ctx->mmap_size) |
837 | vm_munmap(ctx->mmap_base, ctx->mmap_size); |
838 | aio_free_ring(ctx); |
839 | err: |
840 | mutex_unlock(lock: &ctx->ring_lock); |
841 | free_percpu(pdata: ctx->cpu); |
842 | percpu_ref_exit(ref: &ctx->reqs); |
843 | percpu_ref_exit(ref: &ctx->users); |
844 | kmem_cache_free(s: kioctx_cachep, objp: ctx); |
845 | pr_debug("error allocating ioctx %d\n", err); |
846 | return ERR_PTR(error: err); |
847 | } |
848 | |
849 | /* kill_ioctx |
850 | * Cancels all outstanding aio requests on an aio context. Used |
851 | * when the processes owning a context have all exited to encourage |
852 | * the rapid destruction of the kioctx. |
853 | */ |
854 | static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, |
855 | struct ctx_rq_wait *wait) |
856 | { |
857 | struct kioctx_table *table; |
858 | |
859 | spin_lock(lock: &mm->ioctx_lock); |
860 | if (atomic_xchg(v: &ctx->dead, new: 1)) { |
861 | spin_unlock(lock: &mm->ioctx_lock); |
862 | return -EINVAL; |
863 | } |
864 | |
865 | table = rcu_dereference_raw(mm->ioctx_table); |
866 | WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); |
867 | RCU_INIT_POINTER(table->table[ctx->id], NULL); |
868 | spin_unlock(lock: &mm->ioctx_lock); |
869 | |
870 | /* free_ioctx_reqs() will do the necessary RCU synchronization */ |
871 | wake_up_all(&ctx->wait); |
872 | |
873 | /* |
874 | * It'd be more correct to do this in free_ioctx(), after all |
875 | * the outstanding kiocbs have finished - but by then io_destroy |
876 | * has already returned, so io_setup() could potentially return |
877 | * -EAGAIN with no ioctxs actually in use (as far as userspace |
878 | * could tell). |
879 | */ |
880 | aio_nr_sub(nr: ctx->max_reqs); |
881 | |
882 | if (ctx->mmap_size) |
883 | vm_munmap(ctx->mmap_base, ctx->mmap_size); |
884 | |
885 | ctx->rq_wait = wait; |
886 | percpu_ref_kill(ref: &ctx->users); |
887 | return 0; |
888 | } |
889 | |
890 | /* |
891 | * exit_aio: called when the last user of mm goes away. At this point, there is |
892 | * no way for any new requests to be submited or any of the io_* syscalls to be |
893 | * called on the context. |
894 | * |
895 | * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on |
896 | * them. |
897 | */ |
898 | void exit_aio(struct mm_struct *mm) |
899 | { |
900 | struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); |
901 | struct ctx_rq_wait wait; |
902 | int i, skipped; |
903 | |
904 | if (!table) |
905 | return; |
906 | |
907 | atomic_set(v: &wait.count, i: table->nr); |
908 | init_completion(x: &wait.comp); |
909 | |
910 | skipped = 0; |
911 | for (i = 0; i < table->nr; ++i) { |
912 | struct kioctx *ctx = |
913 | rcu_dereference_protected(table->table[i], true); |
914 | |
915 | if (!ctx) { |
916 | skipped++; |
917 | continue; |
918 | } |
919 | |
920 | /* |
921 | * We don't need to bother with munmap() here - exit_mmap(mm) |
922 | * is coming and it'll unmap everything. And we simply can't, |
923 | * this is not necessarily our ->mm. |
924 | * Since kill_ioctx() uses non-zero ->mmap_size as indicator |
925 | * that it needs to unmap the area, just set it to 0. |
926 | */ |
927 | ctx->mmap_size = 0; |
928 | kill_ioctx(mm, ctx, wait: &wait); |
929 | } |
930 | |
931 | if (!atomic_sub_and_test(i: skipped, v: &wait.count)) { |
932 | /* Wait until all IO for the context are done. */ |
933 | wait_for_completion(&wait.comp); |
934 | } |
935 | |
936 | RCU_INIT_POINTER(mm->ioctx_table, NULL); |
937 | kfree(objp: table); |
938 | } |
939 | |
940 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) |
941 | { |
942 | struct kioctx_cpu *kcpu; |
943 | unsigned long flags; |
944 | |
945 | local_irq_save(flags); |
946 | kcpu = this_cpu_ptr(ctx->cpu); |
947 | kcpu->reqs_available += nr; |
948 | |
949 | while (kcpu->reqs_available >= ctx->req_batch * 2) { |
950 | kcpu->reqs_available -= ctx->req_batch; |
951 | atomic_add(i: ctx->req_batch, v: &ctx->reqs_available); |
952 | } |
953 | |
954 | local_irq_restore(flags); |
955 | } |
956 | |
957 | static bool __get_reqs_available(struct kioctx *ctx) |
958 | { |
959 | struct kioctx_cpu *kcpu; |
960 | bool ret = false; |
961 | unsigned long flags; |
962 | |
963 | local_irq_save(flags); |
964 | kcpu = this_cpu_ptr(ctx->cpu); |
965 | if (!kcpu->reqs_available) { |
966 | int avail = atomic_read(v: &ctx->reqs_available); |
967 | |
968 | do { |
969 | if (avail < ctx->req_batch) |
970 | goto out; |
971 | } while (!atomic_try_cmpxchg(v: &ctx->reqs_available, |
972 | old: &avail, new: avail - ctx->req_batch)); |
973 | |
974 | kcpu->reqs_available += ctx->req_batch; |
975 | } |
976 | |
977 | ret = true; |
978 | kcpu->reqs_available--; |
979 | out: |
980 | local_irq_restore(flags); |
981 | return ret; |
982 | } |
983 | |
984 | /* refill_reqs_available |
985 | * Updates the reqs_available reference counts used for tracking the |
986 | * number of free slots in the completion ring. This can be called |
987 | * from aio_complete() (to optimistically update reqs_available) or |
988 | * from aio_get_req() (the we're out of events case). It must be |
989 | * called holding ctx->completion_lock. |
990 | */ |
991 | static void refill_reqs_available(struct kioctx *ctx, unsigned head, |
992 | unsigned tail) |
993 | { |
994 | unsigned events_in_ring, completed; |
995 | |
996 | /* Clamp head since userland can write to it. */ |
997 | head %= ctx->nr_events; |
998 | if (head <= tail) |
999 | events_in_ring = tail - head; |
1000 | else |
1001 | events_in_ring = ctx->nr_events - (head - tail); |
1002 | |
1003 | completed = ctx->completed_events; |
1004 | if (events_in_ring < completed) |
1005 | completed -= events_in_ring; |
1006 | else |
1007 | completed = 0; |
1008 | |
1009 | if (!completed) |
1010 | return; |
1011 | |
1012 | ctx->completed_events -= completed; |
1013 | put_reqs_available(ctx, nr: completed); |
1014 | } |
1015 | |
1016 | /* user_refill_reqs_available |
1017 | * Called to refill reqs_available when aio_get_req() encounters an |
1018 | * out of space in the completion ring. |
1019 | */ |
1020 | static void user_refill_reqs_available(struct kioctx *ctx) |
1021 | { |
1022 | spin_lock_irq(lock: &ctx->completion_lock); |
1023 | if (ctx->completed_events) { |
1024 | struct aio_ring *ring; |
1025 | unsigned head; |
1026 | |
1027 | /* Access of ring->head may race with aio_read_events_ring() |
1028 | * here, but that's okay since whether we read the old version |
1029 | * or the new version, and either will be valid. The important |
1030 | * part is that head cannot pass tail since we prevent |
1031 | * aio_complete() from updating tail by holding |
1032 | * ctx->completion_lock. Even if head is invalid, the check |
1033 | * against ctx->completed_events below will make sure we do the |
1034 | * safe/right thing. |
1035 | */ |
1036 | ring = page_address(ctx->ring_pages[0]); |
1037 | head = ring->head; |
1038 | |
1039 | refill_reqs_available(ctx, head, tail: ctx->tail); |
1040 | } |
1041 | |
1042 | spin_unlock_irq(lock: &ctx->completion_lock); |
1043 | } |
1044 | |
1045 | static bool get_reqs_available(struct kioctx *ctx) |
1046 | { |
1047 | if (__get_reqs_available(ctx)) |
1048 | return true; |
1049 | user_refill_reqs_available(ctx); |
1050 | return __get_reqs_available(ctx); |
1051 | } |
1052 | |
1053 | /* aio_get_req |
1054 | * Allocate a slot for an aio request. |
1055 | * Returns NULL if no requests are free. |
1056 | * |
1057 | * The refcount is initialized to 2 - one for the async op completion, |
1058 | * one for the synchronous code that does this. |
1059 | */ |
1060 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) |
1061 | { |
1062 | struct aio_kiocb *req; |
1063 | |
1064 | req = kmem_cache_alloc(cachep: kiocb_cachep, GFP_KERNEL); |
1065 | if (unlikely(!req)) |
1066 | return NULL; |
1067 | |
1068 | if (unlikely(!get_reqs_available(ctx))) { |
1069 | kmem_cache_free(s: kiocb_cachep, objp: req); |
1070 | return NULL; |
1071 | } |
1072 | |
1073 | percpu_ref_get(ref: &ctx->reqs); |
1074 | req->ki_ctx = ctx; |
1075 | INIT_LIST_HEAD(list: &req->ki_list); |
1076 | refcount_set(r: &req->ki_refcnt, n: 2); |
1077 | req->ki_eventfd = NULL; |
1078 | return req; |
1079 | } |
1080 | |
1081 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
1082 | { |
1083 | struct aio_ring __user *ring = (void __user *)ctx_id; |
1084 | struct mm_struct *mm = current->mm; |
1085 | struct kioctx *ctx, *ret = NULL; |
1086 | struct kioctx_table *table; |
1087 | unsigned id; |
1088 | |
1089 | if (get_user(id, &ring->id)) |
1090 | return NULL; |
1091 | |
1092 | rcu_read_lock(); |
1093 | table = rcu_dereference(mm->ioctx_table); |
1094 | |
1095 | if (!table || id >= table->nr) |
1096 | goto out; |
1097 | |
1098 | id = array_index_nospec(id, table->nr); |
1099 | ctx = rcu_dereference(table->table[id]); |
1100 | if (ctx && ctx->user_id == ctx_id) { |
1101 | if (percpu_ref_tryget_live(ref: &ctx->users)) |
1102 | ret = ctx; |
1103 | } |
1104 | out: |
1105 | rcu_read_unlock(); |
1106 | return ret; |
1107 | } |
1108 | |
1109 | static inline void iocb_destroy(struct aio_kiocb *iocb) |
1110 | { |
1111 | if (iocb->ki_eventfd) |
1112 | eventfd_ctx_put(ctx: iocb->ki_eventfd); |
1113 | if (iocb->ki_filp) |
1114 | fput(iocb->ki_filp); |
1115 | percpu_ref_put(ref: &iocb->ki_ctx->reqs); |
1116 | kmem_cache_free(s: kiocb_cachep, objp: iocb); |
1117 | } |
1118 | |
1119 | struct aio_waiter { |
1120 | struct wait_queue_entry w; |
1121 | size_t min_nr; |
1122 | }; |
1123 | |
1124 | /* aio_complete |
1125 | * Called when the io request on the given iocb is complete. |
1126 | */ |
1127 | static void aio_complete(struct aio_kiocb *iocb) |
1128 | { |
1129 | struct kioctx *ctx = iocb->ki_ctx; |
1130 | struct aio_ring *ring; |
1131 | struct io_event *ev_page, *event; |
1132 | unsigned tail, pos, head, avail; |
1133 | unsigned long flags; |
1134 | |
1135 | /* |
1136 | * Add a completion event to the ring buffer. Must be done holding |
1137 | * ctx->completion_lock to prevent other code from messing with the tail |
1138 | * pointer since we might be called from irq context. |
1139 | */ |
1140 | spin_lock_irqsave(&ctx->completion_lock, flags); |
1141 | |
1142 | tail = ctx->tail; |
1143 | pos = tail + AIO_EVENTS_OFFSET; |
1144 | |
1145 | if (++tail >= ctx->nr_events) |
1146 | tail = 0; |
1147 | |
1148 | ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
1149 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; |
1150 | |
1151 | *event = iocb->ki_res; |
1152 | |
1153 | flush_dcache_page(page: ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
1154 | |
1155 | pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, |
1156 | (void __user *)(unsigned long)iocb->ki_res.obj, |
1157 | iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2); |
1158 | |
1159 | /* after flagging the request as done, we |
1160 | * must never even look at it again |
1161 | */ |
1162 | smp_wmb(); /* make event visible before updating tail */ |
1163 | |
1164 | ctx->tail = tail; |
1165 | |
1166 | ring = page_address(ctx->ring_pages[0]); |
1167 | head = ring->head; |
1168 | ring->tail = tail; |
1169 | flush_dcache_page(page: ctx->ring_pages[0]); |
1170 | |
1171 | ctx->completed_events++; |
1172 | if (ctx->completed_events > 1) |
1173 | refill_reqs_available(ctx, head, tail); |
1174 | |
1175 | avail = tail > head |
1176 | ? tail - head |
1177 | : tail + ctx->nr_events - head; |
1178 | spin_unlock_irqrestore(lock: &ctx->completion_lock, flags); |
1179 | |
1180 | pr_debug("added to ring %p at [%u]\n", iocb, tail); |
1181 | |
1182 | /* |
1183 | * Check if the user asked us to deliver the result through an |
1184 | * eventfd. The eventfd_signal() function is safe to be called |
1185 | * from IRQ context. |
1186 | */ |
1187 | if (iocb->ki_eventfd) |
1188 | eventfd_signal(ctx: iocb->ki_eventfd); |
1189 | |
1190 | /* |
1191 | * We have to order our ring_info tail store above and test |
1192 | * of the wait list below outside the wait lock. This is |
1193 | * like in wake_up_bit() where clearing a bit has to be |
1194 | * ordered with the unlocked test. |
1195 | */ |
1196 | smp_mb(); |
1197 | |
1198 | if (waitqueue_active(wq_head: &ctx->wait)) { |
1199 | struct aio_waiter *curr, *next; |
1200 | unsigned long flags; |
1201 | |
1202 | spin_lock_irqsave(&ctx->wait.lock, flags); |
1203 | list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry) |
1204 | if (avail >= curr->min_nr) { |
1205 | wake_up_process(tsk: curr->w.private); |
1206 | list_del_init_careful(entry: &curr->w.entry); |
1207 | } |
1208 | spin_unlock_irqrestore(lock: &ctx->wait.lock, flags); |
1209 | } |
1210 | } |
1211 | |
1212 | static inline void iocb_put(struct aio_kiocb *iocb) |
1213 | { |
1214 | if (refcount_dec_and_test(r: &iocb->ki_refcnt)) { |
1215 | aio_complete(iocb); |
1216 | iocb_destroy(iocb); |
1217 | } |
1218 | } |
1219 | |
1220 | /* aio_read_events_ring |
1221 | * Pull an event off of the ioctx's event ring. Returns the number of |
1222 | * events fetched |
1223 | */ |
1224 | static long aio_read_events_ring(struct kioctx *ctx, |
1225 | struct io_event __user *event, long nr) |
1226 | { |
1227 | struct aio_ring *ring; |
1228 | unsigned head, tail, pos; |
1229 | long ret = 0; |
1230 | int copy_ret; |
1231 | |
1232 | /* |
1233 | * The mutex can block and wake us up and that will cause |
1234 | * wait_event_interruptible_hrtimeout() to schedule without sleeping |
1235 | * and repeat. This should be rare enough that it doesn't cause |
1236 | * peformance issues. See the comment in read_events() for more detail. |
1237 | */ |
1238 | sched_annotate_sleep(); |
1239 | mutex_lock(&ctx->ring_lock); |
1240 | |
1241 | /* Access to ->ring_pages here is protected by ctx->ring_lock. */ |
1242 | ring = page_address(ctx->ring_pages[0]); |
1243 | head = ring->head; |
1244 | tail = ring->tail; |
1245 | |
1246 | /* |
1247 | * Ensure that once we've read the current tail pointer, that |
1248 | * we also see the events that were stored up to the tail. |
1249 | */ |
1250 | smp_rmb(); |
1251 | |
1252 | pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); |
1253 | |
1254 | if (head == tail) |
1255 | goto out; |
1256 | |
1257 | head %= ctx->nr_events; |
1258 | tail %= ctx->nr_events; |
1259 | |
1260 | while (ret < nr) { |
1261 | long avail; |
1262 | struct io_event *ev; |
1263 | struct page *page; |
1264 | |
1265 | avail = (head <= tail ? tail : ctx->nr_events) - head; |
1266 | if (head == tail) |
1267 | break; |
1268 | |
1269 | pos = head + AIO_EVENTS_OFFSET; |
1270 | page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; |
1271 | pos %= AIO_EVENTS_PER_PAGE; |
1272 | |
1273 | avail = min(avail, nr - ret); |
1274 | avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos); |
1275 | |
1276 | ev = page_address(page); |
1277 | copy_ret = copy_to_user(to: event + ret, from: ev + pos, |
1278 | n: sizeof(*ev) * avail); |
1279 | |
1280 | if (unlikely(copy_ret)) { |
1281 | ret = -EFAULT; |
1282 | goto out; |
1283 | } |
1284 | |
1285 | ret += avail; |
1286 | head += avail; |
1287 | head %= ctx->nr_events; |
1288 | } |
1289 | |
1290 | ring = page_address(ctx->ring_pages[0]); |
1291 | ring->head = head; |
1292 | flush_dcache_page(page: ctx->ring_pages[0]); |
1293 | |
1294 | pr_debug("%li h%u t%u\n", ret, head, tail); |
1295 | out: |
1296 | mutex_unlock(lock: &ctx->ring_lock); |
1297 | |
1298 | return ret; |
1299 | } |
1300 | |
1301 | static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, |
1302 | struct io_event __user *event, long *i) |
1303 | { |
1304 | long ret = aio_read_events_ring(ctx, event: event + *i, nr: nr - *i); |
1305 | |
1306 | if (ret > 0) |
1307 | *i += ret; |
1308 | |
1309 | if (unlikely(atomic_read(&ctx->dead))) |
1310 | ret = -EINVAL; |
1311 | |
1312 | if (!*i) |
1313 | *i = ret; |
1314 | |
1315 | return ret < 0 || *i >= min_nr; |
1316 | } |
1317 | |
1318 | static long read_events(struct kioctx *ctx, long min_nr, long nr, |
1319 | struct io_event __user *event, |
1320 | ktime_t until) |
1321 | { |
1322 | struct hrtimer_sleeper t; |
1323 | struct aio_waiter w; |
1324 | long ret = 0, ret2 = 0; |
1325 | |
1326 | /* |
1327 | * Note that aio_read_events() is being called as the conditional - i.e. |
1328 | * we're calling it after prepare_to_wait() has set task state to |
1329 | * TASK_INTERRUPTIBLE. |
1330 | * |
1331 | * But aio_read_events() can block, and if it blocks it's going to flip |
1332 | * the task state back to TASK_RUNNING. |
1333 | * |
1334 | * This should be ok, provided it doesn't flip the state back to |
1335 | * TASK_RUNNING and return 0 too much - that causes us to spin. That |
1336 | * will only happen if the mutex_lock() call blocks, and we then find |
1337 | * the ringbuffer empty. So in practice we should be ok, but it's |
1338 | * something to be aware of when touching this code. |
1339 | */ |
1340 | aio_read_events(ctx, min_nr, nr, event, i: &ret); |
1341 | if (until == 0 || ret < 0 || ret >= min_nr) |
1342 | return ret; |
1343 | |
1344 | hrtimer_init_sleeper_on_stack(sl: &t, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
1345 | if (until != KTIME_MAX) { |
1346 | hrtimer_set_expires_range_ns(timer: &t.timer, time: until, current->timer_slack_ns); |
1347 | hrtimer_sleeper_start_expires(sl: &t, mode: HRTIMER_MODE_REL); |
1348 | } |
1349 | |
1350 | init_wait(&w.w); |
1351 | |
1352 | while (1) { |
1353 | unsigned long nr_got = ret; |
1354 | |
1355 | w.min_nr = min_nr - ret; |
1356 | |
1357 | ret2 = prepare_to_wait_event(wq_head: &ctx->wait, wq_entry: &w.w, TASK_INTERRUPTIBLE); |
1358 | if (!ret2 && !t.task) |
1359 | ret2 = -ETIME; |
1360 | |
1361 | if (aio_read_events(ctx, min_nr, nr, event, i: &ret) || ret2) |
1362 | break; |
1363 | |
1364 | if (nr_got == ret) |
1365 | schedule(); |
1366 | } |
1367 | |
1368 | finish_wait(wq_head: &ctx->wait, wq_entry: &w.w); |
1369 | hrtimer_cancel(timer: &t.timer); |
1370 | destroy_hrtimer_on_stack(timer: &t.timer); |
1371 | |
1372 | return ret; |
1373 | } |
1374 | |
1375 | /* sys_io_setup: |
1376 | * Create an aio_context capable of receiving at least nr_events. |
1377 | * ctxp must not point to an aio_context that already exists, and |
1378 | * must be initialized to 0 prior to the call. On successful |
1379 | * creation of the aio_context, *ctxp is filled in with the resulting |
1380 | * handle. May fail with -EINVAL if *ctxp is not initialized, |
1381 | * if the specified nr_events exceeds internal limits. May fail |
1382 | * with -EAGAIN if the specified nr_events exceeds the user's limit |
1383 | * of available events. May fail with -ENOMEM if insufficient kernel |
1384 | * resources are available. May fail with -EFAULT if an invalid |
1385 | * pointer is passed for ctxp. Will fail with -ENOSYS if not |
1386 | * implemented. |
1387 | */ |
1388 | SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) |
1389 | { |
1390 | struct kioctx *ioctx = NULL; |
1391 | unsigned long ctx; |
1392 | long ret; |
1393 | |
1394 | ret = get_user(ctx, ctxp); |
1395 | if (unlikely(ret)) |
1396 | goto out; |
1397 | |
1398 | ret = -EINVAL; |
1399 | if (unlikely(ctx || nr_events == 0)) { |
1400 | pr_debug("EINVAL: ctx %lu nr_events %u\n", |
1401 | ctx, nr_events); |
1402 | goto out; |
1403 | } |
1404 | |
1405 | ioctx = ioctx_alloc(nr_events); |
1406 | ret = PTR_ERR(ptr: ioctx); |
1407 | if (!IS_ERR(ptr: ioctx)) { |
1408 | ret = put_user(ioctx->user_id, ctxp); |
1409 | if (ret) |
1410 | kill_ioctx(current->mm, ctx: ioctx, NULL); |
1411 | percpu_ref_put(ref: &ioctx->users); |
1412 | } |
1413 | |
1414 | out: |
1415 | return ret; |
1416 | } |
1417 | |
1418 | #ifdef CONFIG_COMPAT |
1419 | COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p) |
1420 | { |
1421 | struct kioctx *ioctx = NULL; |
1422 | unsigned long ctx; |
1423 | long ret; |
1424 | |
1425 | ret = get_user(ctx, ctx32p); |
1426 | if (unlikely(ret)) |
1427 | goto out; |
1428 | |
1429 | ret = -EINVAL; |
1430 | if (unlikely(ctx || nr_events == 0)) { |
1431 | pr_debug("EINVAL: ctx %lu nr_events %u\n", |
1432 | ctx, nr_events); |
1433 | goto out; |
1434 | } |
1435 | |
1436 | ioctx = ioctx_alloc(nr_events); |
1437 | ret = PTR_ERR(ptr: ioctx); |
1438 | if (!IS_ERR(ptr: ioctx)) { |
1439 | /* truncating is ok because it's a user address */ |
1440 | ret = put_user((u32)ioctx->user_id, ctx32p); |
1441 | if (ret) |
1442 | kill_ioctx(current->mm, ctx: ioctx, NULL); |
1443 | percpu_ref_put(ref: &ioctx->users); |
1444 | } |
1445 | |
1446 | out: |
1447 | return ret; |
1448 | } |
1449 | #endif |
1450 | |
1451 | /* sys_io_destroy: |
1452 | * Destroy the aio_context specified. May cancel any outstanding |
1453 | * AIOs and block on completion. Will fail with -ENOSYS if not |
1454 | * implemented. May fail with -EINVAL if the context pointed to |
1455 | * is invalid. |
1456 | */ |
1457 | SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) |
1458 | { |
1459 | struct kioctx *ioctx = lookup_ioctx(ctx_id: ctx); |
1460 | if (likely(NULL != ioctx)) { |
1461 | struct ctx_rq_wait wait; |
1462 | int ret; |
1463 | |
1464 | init_completion(x: &wait.comp); |
1465 | atomic_set(v: &wait.count, i: 1); |
1466 | |
1467 | /* Pass requests_done to kill_ioctx() where it can be set |
1468 | * in a thread-safe way. If we try to set it here then we have |
1469 | * a race condition if two io_destroy() called simultaneously. |
1470 | */ |
1471 | ret = kill_ioctx(current->mm, ctx: ioctx, wait: &wait); |
1472 | percpu_ref_put(ref: &ioctx->users); |
1473 | |
1474 | /* Wait until all IO for the context are done. Otherwise kernel |
1475 | * keep using user-space buffers even if user thinks the context |
1476 | * is destroyed. |
1477 | */ |
1478 | if (!ret) |
1479 | wait_for_completion(&wait.comp); |
1480 | |
1481 | return ret; |
1482 | } |
1483 | pr_debug("EINVAL: invalid context id\n"); |
1484 | return -EINVAL; |
1485 | } |
1486 | |
1487 | static void aio_remove_iocb(struct aio_kiocb *iocb) |
1488 | { |
1489 | struct kioctx *ctx = iocb->ki_ctx; |
1490 | unsigned long flags; |
1491 | |
1492 | spin_lock_irqsave(&ctx->ctx_lock, flags); |
1493 | list_del(entry: &iocb->ki_list); |
1494 | spin_unlock_irqrestore(lock: &ctx->ctx_lock, flags); |
1495 | } |
1496 | |
1497 | static void aio_complete_rw(struct kiocb *kiocb, long res) |
1498 | { |
1499 | struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw); |
1500 | |
1501 | if (!list_empty_careful(head: &iocb->ki_list)) |
1502 | aio_remove_iocb(iocb); |
1503 | |
1504 | if (kiocb->ki_flags & IOCB_WRITE) { |
1505 | struct inode *inode = file_inode(f: kiocb->ki_filp); |
1506 | |
1507 | if (S_ISREG(inode->i_mode)) |
1508 | kiocb_end_write(iocb: kiocb); |
1509 | } |
1510 | |
1511 | iocb->ki_res.res = res; |
1512 | iocb->ki_res.res2 = 0; |
1513 | iocb_put(iocb); |
1514 | } |
1515 | |
1516 | static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) |
1517 | { |
1518 | int ret; |
1519 | |
1520 | req->ki_complete = aio_complete_rw; |
1521 | req->private = NULL; |
1522 | req->ki_pos = iocb->aio_offset; |
1523 | req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW; |
1524 | if (iocb->aio_flags & IOCB_FLAG_RESFD) |
1525 | req->ki_flags |= IOCB_EVENTFD; |
1526 | if (iocb->aio_flags & IOCB_FLAG_IOPRIO) { |
1527 | /* |
1528 | * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then |
1529 | * aio_reqprio is interpreted as an I/O scheduling |
1530 | * class and priority. |
1531 | */ |
1532 | ret = ioprio_check_cap(ioprio: iocb->aio_reqprio); |
1533 | if (ret) { |
1534 | pr_debug("aio ioprio check cap error: %d\n", ret); |
1535 | return ret; |
1536 | } |
1537 | |
1538 | req->ki_ioprio = iocb->aio_reqprio; |
1539 | } else |
1540 | req->ki_ioprio = get_current_ioprio(); |
1541 | |
1542 | ret = kiocb_set_rw_flags(ki: req, flags: iocb->aio_rw_flags); |
1543 | if (unlikely(ret)) |
1544 | return ret; |
1545 | |
1546 | req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */ |
1547 | return 0; |
1548 | } |
1549 | |
1550 | static ssize_t aio_setup_rw(int rw, const struct iocb *iocb, |
1551 | struct iovec **iovec, bool vectored, bool compat, |
1552 | struct iov_iter *iter) |
1553 | { |
1554 | void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; |
1555 | size_t len = iocb->aio_nbytes; |
1556 | |
1557 | if (!vectored) { |
1558 | ssize_t ret = import_ubuf(type: rw, buf, len, i: iter); |
1559 | *iovec = NULL; |
1560 | return ret; |
1561 | } |
1562 | |
1563 | return __import_iovec(type: rw, uvec: buf, nr_segs: len, UIO_FASTIOV, iovp: iovec, i: iter, compat); |
1564 | } |
1565 | |
1566 | static inline void aio_rw_done(struct kiocb *req, ssize_t ret) |
1567 | { |
1568 | switch (ret) { |
1569 | case -EIOCBQUEUED: |
1570 | break; |
1571 | case -ERESTARTSYS: |
1572 | case -ERESTARTNOINTR: |
1573 | case -ERESTARTNOHAND: |
1574 | case -ERESTART_RESTARTBLOCK: |
1575 | /* |
1576 | * There's no easy way to restart the syscall since other AIO's |
1577 | * may be already running. Just fail this IO with EINTR. |
1578 | */ |
1579 | ret = -EINTR; |
1580 | fallthrough; |
1581 | default: |
1582 | req->ki_complete(req, ret); |
1583 | } |
1584 | } |
1585 | |
1586 | static int aio_read(struct kiocb *req, const struct iocb *iocb, |
1587 | bool vectored, bool compat) |
1588 | { |
1589 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
1590 | struct iov_iter iter; |
1591 | struct file *file; |
1592 | int ret; |
1593 | |
1594 | ret = aio_prep_rw(req, iocb); |
1595 | if (ret) |
1596 | return ret; |
1597 | file = req->ki_filp; |
1598 | if (unlikely(!(file->f_mode & FMODE_READ))) |
1599 | return -EBADF; |
1600 | if (unlikely(!file->f_op->read_iter)) |
1601 | return -EINVAL; |
1602 | |
1603 | ret = aio_setup_rw(ITER_DEST, iocb, iovec: &iovec, vectored, compat, iter: &iter); |
1604 | if (ret < 0) |
1605 | return ret; |
1606 | ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(i: &iter)); |
1607 | if (!ret) |
1608 | aio_rw_done(req, ret: call_read_iter(file, kio: req, iter: &iter)); |
1609 | kfree(objp: iovec); |
1610 | return ret; |
1611 | } |
1612 | |
1613 | static int aio_write(struct kiocb *req, const struct iocb *iocb, |
1614 | bool vectored, bool compat) |
1615 | { |
1616 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
1617 | struct iov_iter iter; |
1618 | struct file *file; |
1619 | int ret; |
1620 | |
1621 | ret = aio_prep_rw(req, iocb); |
1622 | if (ret) |
1623 | return ret; |
1624 | file = req->ki_filp; |
1625 | |
1626 | if (unlikely(!(file->f_mode & FMODE_WRITE))) |
1627 | return -EBADF; |
1628 | if (unlikely(!file->f_op->write_iter)) |
1629 | return -EINVAL; |
1630 | |
1631 | ret = aio_setup_rw(ITER_SOURCE, iocb, iovec: &iovec, vectored, compat, iter: &iter); |
1632 | if (ret < 0) |
1633 | return ret; |
1634 | ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(i: &iter)); |
1635 | if (!ret) { |
1636 | if (S_ISREG(file_inode(file)->i_mode)) |
1637 | kiocb_start_write(iocb: req); |
1638 | req->ki_flags |= IOCB_WRITE; |
1639 | aio_rw_done(req, ret: call_write_iter(file, kio: req, iter: &iter)); |
1640 | } |
1641 | kfree(objp: iovec); |
1642 | return ret; |
1643 | } |
1644 | |
1645 | static void aio_fsync_work(struct work_struct *work) |
1646 | { |
1647 | struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); |
1648 | const struct cred *old_cred = override_creds(iocb->fsync.creds); |
1649 | |
1650 | iocb->ki_res.res = vfs_fsync(file: iocb->fsync.file, datasync: iocb->fsync.datasync); |
1651 | revert_creds(old_cred); |
1652 | put_cred(cred: iocb->fsync.creds); |
1653 | iocb_put(iocb); |
1654 | } |
1655 | |
1656 | static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, |
1657 | bool datasync) |
1658 | { |
1659 | if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes || |
1660 | iocb->aio_rw_flags)) |
1661 | return -EINVAL; |
1662 | |
1663 | if (unlikely(!req->file->f_op->fsync)) |
1664 | return -EINVAL; |
1665 | |
1666 | req->creds = prepare_creds(); |
1667 | if (!req->creds) |
1668 | return -ENOMEM; |
1669 | |
1670 | req->datasync = datasync; |
1671 | INIT_WORK(&req->work, aio_fsync_work); |
1672 | schedule_work(work: &req->work); |
1673 | return 0; |
1674 | } |
1675 | |
1676 | static void aio_poll_put_work(struct work_struct *work) |
1677 | { |
1678 | struct poll_iocb *req = container_of(work, struct poll_iocb, work); |
1679 | struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); |
1680 | |
1681 | iocb_put(iocb); |
1682 | } |
1683 | |
1684 | /* |
1685 | * Safely lock the waitqueue which the request is on, synchronizing with the |
1686 | * case where the ->poll() provider decides to free its waitqueue early. |
1687 | * |
1688 | * Returns true on success, meaning that req->head->lock was locked, req->wait |
1689 | * is on req->head, and an RCU read lock was taken. Returns false if the |
1690 | * request was already removed from its waitqueue (which might no longer exist). |
1691 | */ |
1692 | static bool poll_iocb_lock_wq(struct poll_iocb *req) |
1693 | { |
1694 | wait_queue_head_t *head; |
1695 | |
1696 | /* |
1697 | * While we hold the waitqueue lock and the waitqueue is nonempty, |
1698 | * wake_up_pollfree() will wait for us. However, taking the waitqueue |
1699 | * lock in the first place can race with the waitqueue being freed. |
1700 | * |
1701 | * We solve this as eventpoll does: by taking advantage of the fact that |
1702 | * all users of wake_up_pollfree() will RCU-delay the actual free. If |
1703 | * we enter rcu_read_lock() and see that the pointer to the queue is |
1704 | * non-NULL, we can then lock it without the memory being freed out from |
1705 | * under us, then check whether the request is still on the queue. |
1706 | * |
1707 | * Keep holding rcu_read_lock() as long as we hold the queue lock, in |
1708 | * case the caller deletes the entry from the queue, leaving it empty. |
1709 | * In that case, only RCU prevents the queue memory from being freed. |
1710 | */ |
1711 | rcu_read_lock(); |
1712 | head = smp_load_acquire(&req->head); |
1713 | if (head) { |
1714 | spin_lock(lock: &head->lock); |
1715 | if (!list_empty(head: &req->wait.entry)) |
1716 | return true; |
1717 | spin_unlock(lock: &head->lock); |
1718 | } |
1719 | rcu_read_unlock(); |
1720 | return false; |
1721 | } |
1722 | |
1723 | static void poll_iocb_unlock_wq(struct poll_iocb *req) |
1724 | { |
1725 | spin_unlock(lock: &req->head->lock); |
1726 | rcu_read_unlock(); |
1727 | } |
1728 | |
1729 | static void aio_poll_complete_work(struct work_struct *work) |
1730 | { |
1731 | struct poll_iocb *req = container_of(work, struct poll_iocb, work); |
1732 | struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); |
1733 | struct poll_table_struct pt = { ._key = req->events }; |
1734 | struct kioctx *ctx = iocb->ki_ctx; |
1735 | __poll_t mask = 0; |
1736 | |
1737 | if (!READ_ONCE(req->cancelled)) |
1738 | mask = vfs_poll(file: req->file, pt: &pt) & req->events; |
1739 | |
1740 | /* |
1741 | * Note that ->ki_cancel callers also delete iocb from active_reqs after |
1742 | * calling ->ki_cancel. We need the ctx_lock roundtrip here to |
1743 | * synchronize with them. In the cancellation case the list_del_init |
1744 | * itself is not actually needed, but harmless so we keep it in to |
1745 | * avoid further branches in the fast path. |
1746 | */ |
1747 | spin_lock_irq(lock: &ctx->ctx_lock); |
1748 | if (poll_iocb_lock_wq(req)) { |
1749 | if (!mask && !READ_ONCE(req->cancelled)) { |
1750 | /* |
1751 | * The request isn't actually ready to be completed yet. |
1752 | * Reschedule completion if another wakeup came in. |
1753 | */ |
1754 | if (req->work_need_resched) { |
1755 | schedule_work(work: &req->work); |
1756 | req->work_need_resched = false; |
1757 | } else { |
1758 | req->work_scheduled = false; |
1759 | } |
1760 | poll_iocb_unlock_wq(req); |
1761 | spin_unlock_irq(lock: &ctx->ctx_lock); |
1762 | return; |
1763 | } |
1764 | list_del_init(entry: &req->wait.entry); |
1765 | poll_iocb_unlock_wq(req); |
1766 | } /* else, POLLFREE has freed the waitqueue, so we must complete */ |
1767 | list_del_init(entry: &iocb->ki_list); |
1768 | iocb->ki_res.res = mangle_poll(val: mask); |
1769 | spin_unlock_irq(lock: &ctx->ctx_lock); |
1770 | |
1771 | iocb_put(iocb); |
1772 | } |
1773 | |
1774 | /* assumes we are called with irqs disabled */ |
1775 | static int aio_poll_cancel(struct kiocb *iocb) |
1776 | { |
1777 | struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); |
1778 | struct poll_iocb *req = &aiocb->poll; |
1779 | |
1780 | if (poll_iocb_lock_wq(req)) { |
1781 | WRITE_ONCE(req->cancelled, true); |
1782 | if (!req->work_scheduled) { |
1783 | schedule_work(work: &aiocb->poll.work); |
1784 | req->work_scheduled = true; |
1785 | } |
1786 | poll_iocb_unlock_wq(req); |
1787 | } /* else, the request was force-cancelled by POLLFREE already */ |
1788 | |
1789 | return 0; |
1790 | } |
1791 | |
1792 | static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
1793 | void *key) |
1794 | { |
1795 | struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); |
1796 | struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); |
1797 | __poll_t mask = key_to_poll(key); |
1798 | unsigned long flags; |
1799 | |
1800 | /* for instances that support it check for an event match first: */ |
1801 | if (mask && !(mask & req->events)) |
1802 | return 0; |
1803 | |
1804 | /* |
1805 | * Complete the request inline if possible. This requires that three |
1806 | * conditions be met: |
1807 | * 1. An event mask must have been passed. If a plain wakeup was done |
1808 | * instead, then mask == 0 and we have to call vfs_poll() to get |
1809 | * the events, so inline completion isn't possible. |
1810 | * 2. The completion work must not have already been scheduled. |
1811 | * 3. ctx_lock must not be busy. We have to use trylock because we |
1812 | * already hold the waitqueue lock, so this inverts the normal |
1813 | * locking order. Use irqsave/irqrestore because not all |
1814 | * filesystems (e.g. fuse) call this function with IRQs disabled, |
1815 | * yet IRQs have to be disabled before ctx_lock is obtained. |
1816 | */ |
1817 | if (mask && !req->work_scheduled && |
1818 | spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { |
1819 | struct kioctx *ctx = iocb->ki_ctx; |
1820 | |
1821 | list_del_init(entry: &req->wait.entry); |
1822 | list_del(entry: &iocb->ki_list); |
1823 | iocb->ki_res.res = mangle_poll(val: mask); |
1824 | if (iocb->ki_eventfd && !eventfd_signal_allowed()) { |
1825 | iocb = NULL; |
1826 | INIT_WORK(&req->work, aio_poll_put_work); |
1827 | schedule_work(work: &req->work); |
1828 | } |
1829 | spin_unlock_irqrestore(lock: &ctx->ctx_lock, flags); |
1830 | if (iocb) |
1831 | iocb_put(iocb); |
1832 | } else { |
1833 | /* |
1834 | * Schedule the completion work if needed. If it was already |
1835 | * scheduled, record that another wakeup came in. |
1836 | * |
1837 | * Don't remove the request from the waitqueue here, as it might |
1838 | * not actually be complete yet (we won't know until vfs_poll() |
1839 | * is called), and we must not miss any wakeups. POLLFREE is an |
1840 | * exception to this; see below. |
1841 | */ |
1842 | if (req->work_scheduled) { |
1843 | req->work_need_resched = true; |
1844 | } else { |
1845 | schedule_work(work: &req->work); |
1846 | req->work_scheduled = true; |
1847 | } |
1848 | |
1849 | /* |
1850 | * If the waitqueue is being freed early but we can't complete |
1851 | * the request inline, we have to tear down the request as best |
1852 | * we can. That means immediately removing the request from its |
1853 | * waitqueue and preventing all further accesses to the |
1854 | * waitqueue via the request. We also need to schedule the |
1855 | * completion work (done above). Also mark the request as |
1856 | * cancelled, to potentially skip an unneeded call to ->poll(). |
1857 | */ |
1858 | if (mask & POLLFREE) { |
1859 | WRITE_ONCE(req->cancelled, true); |
1860 | list_del_init(entry: &req->wait.entry); |
1861 | |
1862 | /* |
1863 | * Careful: this *must* be the last step, since as soon |
1864 | * as req->head is NULL'ed out, the request can be |
1865 | * completed and freed, since aio_poll_complete_work() |
1866 | * will no longer need to take the waitqueue lock. |
1867 | */ |
1868 | smp_store_release(&req->head, NULL); |
1869 | } |
1870 | } |
1871 | return 1; |
1872 | } |
1873 | |
1874 | struct aio_poll_table { |
1875 | struct poll_table_struct pt; |
1876 | struct aio_kiocb *iocb; |
1877 | bool queued; |
1878 | int error; |
1879 | }; |
1880 | |
1881 | static void |
1882 | aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, |
1883 | struct poll_table_struct *p) |
1884 | { |
1885 | struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt); |
1886 | |
1887 | /* multiple wait queues per file are not supported */ |
1888 | if (unlikely(pt->queued)) { |
1889 | pt->error = -EINVAL; |
1890 | return; |
1891 | } |
1892 | |
1893 | pt->queued = true; |
1894 | pt->error = 0; |
1895 | pt->iocb->poll.head = head; |
1896 | add_wait_queue(wq_head: head, wq_entry: &pt->iocb->poll.wait); |
1897 | } |
1898 | |
1899 | static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) |
1900 | { |
1901 | struct kioctx *ctx = aiocb->ki_ctx; |
1902 | struct poll_iocb *req = &aiocb->poll; |
1903 | struct aio_poll_table apt; |
1904 | bool cancel = false; |
1905 | __poll_t mask; |
1906 | |
1907 | /* reject any unknown events outside the normal event mask. */ |
1908 | if ((u16)iocb->aio_buf != iocb->aio_buf) |
1909 | return -EINVAL; |
1910 | /* reject fields that are not defined for poll */ |
1911 | if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags) |
1912 | return -EINVAL; |
1913 | |
1914 | INIT_WORK(&req->work, aio_poll_complete_work); |
1915 | req->events = demangle_poll(val: iocb->aio_buf) | EPOLLERR | EPOLLHUP; |
1916 | |
1917 | req->head = NULL; |
1918 | req->cancelled = false; |
1919 | req->work_scheduled = false; |
1920 | req->work_need_resched = false; |
1921 | |
1922 | apt.pt._qproc = aio_poll_queue_proc; |
1923 | apt.pt._key = req->events; |
1924 | apt.iocb = aiocb; |
1925 | apt.queued = false; |
1926 | apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ |
1927 | |
1928 | /* initialized the list so that we can do list_empty checks */ |
1929 | INIT_LIST_HEAD(list: &req->wait.entry); |
1930 | init_waitqueue_func_entry(wq_entry: &req->wait, func: aio_poll_wake); |
1931 | |
1932 | mask = vfs_poll(file: req->file, pt: &apt.pt) & req->events; |
1933 | spin_lock_irq(lock: &ctx->ctx_lock); |
1934 | if (likely(apt.queued)) { |
1935 | bool on_queue = poll_iocb_lock_wq(req); |
1936 | |
1937 | if (!on_queue || req->work_scheduled) { |
1938 | /* |
1939 | * aio_poll_wake() already either scheduled the async |
1940 | * completion work, or completed the request inline. |
1941 | */ |
1942 | if (apt.error) /* unsupported case: multiple queues */ |
1943 | cancel = true; |
1944 | apt.error = 0; |
1945 | mask = 0; |
1946 | } |
1947 | if (mask || apt.error) { |
1948 | /* Steal to complete synchronously. */ |
1949 | list_del_init(entry: &req->wait.entry); |
1950 | } else if (cancel) { |
1951 | /* Cancel if possible (may be too late though). */ |
1952 | WRITE_ONCE(req->cancelled, true); |
1953 | } else if (on_queue) { |
1954 | /* |
1955 | * Actually waiting for an event, so add the request to |
1956 | * active_reqs so that it can be cancelled if needed. |
1957 | */ |
1958 | list_add_tail(new: &aiocb->ki_list, head: &ctx->active_reqs); |
1959 | aiocb->ki_cancel = aio_poll_cancel; |
1960 | } |
1961 | if (on_queue) |
1962 | poll_iocb_unlock_wq(req); |
1963 | } |
1964 | if (mask) { /* no async, we'd stolen it */ |
1965 | aiocb->ki_res.res = mangle_poll(val: mask); |
1966 | apt.error = 0; |
1967 | } |
1968 | spin_unlock_irq(lock: &ctx->ctx_lock); |
1969 | if (mask) |
1970 | iocb_put(iocb: aiocb); |
1971 | return apt.error; |
1972 | } |
1973 | |
1974 | static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, |
1975 | struct iocb __user *user_iocb, struct aio_kiocb *req, |
1976 | bool compat) |
1977 | { |
1978 | req->ki_filp = fget(fd: iocb->aio_fildes); |
1979 | if (unlikely(!req->ki_filp)) |
1980 | return -EBADF; |
1981 | |
1982 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
1983 | struct eventfd_ctx *eventfd; |
1984 | /* |
1985 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an |
1986 | * instance of the file* now. The file descriptor must be |
1987 | * an eventfd() fd, and will be signaled for each completed |
1988 | * event using the eventfd_signal() function. |
1989 | */ |
1990 | eventfd = eventfd_ctx_fdget(fd: iocb->aio_resfd); |
1991 | if (IS_ERR(ptr: eventfd)) |
1992 | return PTR_ERR(ptr: eventfd); |
1993 | |
1994 | req->ki_eventfd = eventfd; |
1995 | } |
1996 | |
1997 | if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) { |
1998 | pr_debug("EFAULT: aio_key\n"); |
1999 | return -EFAULT; |
2000 | } |
2001 | |
2002 | req->ki_res.obj = (u64)(unsigned long)user_iocb; |
2003 | req->ki_res.data = iocb->aio_data; |
2004 | req->ki_res.res = 0; |
2005 | req->ki_res.res2 = 0; |
2006 | |
2007 | switch (iocb->aio_lio_opcode) { |
2008 | case IOCB_CMD_PREAD: |
2009 | return aio_read(req: &req->rw, iocb, vectored: false, compat); |
2010 | case IOCB_CMD_PWRITE: |
2011 | return aio_write(req: &req->rw, iocb, vectored: false, compat); |
2012 | case IOCB_CMD_PREADV: |
2013 | return aio_read(req: &req->rw, iocb, vectored: true, compat); |
2014 | case IOCB_CMD_PWRITEV: |
2015 | return aio_write(req: &req->rw, iocb, vectored: true, compat); |
2016 | case IOCB_CMD_FSYNC: |
2017 | return aio_fsync(req: &req->fsync, iocb, datasync: false); |
2018 | case IOCB_CMD_FDSYNC: |
2019 | return aio_fsync(req: &req->fsync, iocb, datasync: true); |
2020 | case IOCB_CMD_POLL: |
2021 | return aio_poll(aiocb: req, iocb); |
2022 | default: |
2023 | pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); |
2024 | return -EINVAL; |
2025 | } |
2026 | } |
2027 | |
2028 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
2029 | bool compat) |
2030 | { |
2031 | struct aio_kiocb *req; |
2032 | struct iocb iocb; |
2033 | int err; |
2034 | |
2035 | if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) |
2036 | return -EFAULT; |
2037 | |
2038 | /* enforce forwards compatibility on users */ |
2039 | if (unlikely(iocb.aio_reserved2)) { |
2040 | pr_debug("EINVAL: reserve field set\n"); |
2041 | return -EINVAL; |
2042 | } |
2043 | |
2044 | /* prevent overflows */ |
2045 | if (unlikely( |
2046 | (iocb.aio_buf != (unsigned long)iocb.aio_buf) || |
2047 | (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) || |
2048 | ((ssize_t)iocb.aio_nbytes < 0) |
2049 | )) { |
2050 | pr_debug("EINVAL: overflow check\n"); |
2051 | return -EINVAL; |
2052 | } |
2053 | |
2054 | req = aio_get_req(ctx); |
2055 | if (unlikely(!req)) |
2056 | return -EAGAIN; |
2057 | |
2058 | err = __io_submit_one(ctx, iocb: &iocb, user_iocb, req, compat); |
2059 | |
2060 | /* Done with the synchronous reference */ |
2061 | iocb_put(iocb: req); |
2062 | |
2063 | /* |
2064 | * If err is 0, we'd either done aio_complete() ourselves or have |
2065 | * arranged for that to be done asynchronously. Anything non-zero |
2066 | * means that we need to destroy req ourselves. |
2067 | */ |
2068 | if (unlikely(err)) { |
2069 | iocb_destroy(iocb: req); |
2070 | put_reqs_available(ctx, nr: 1); |
2071 | } |
2072 | return err; |
2073 | } |
2074 | |
2075 | /* sys_io_submit: |
2076 | * Queue the nr iocbs pointed to by iocbpp for processing. Returns |
2077 | * the number of iocbs queued. May return -EINVAL if the aio_context |
2078 | * specified by ctx_id is invalid, if nr is < 0, if the iocb at |
2079 | * *iocbpp[0] is not properly initialized, if the operation specified |
2080 | * is invalid for the file descriptor in the iocb. May fail with |
2081 | * -EFAULT if any of the data structures point to invalid data. May |
2082 | * fail with -EBADF if the file descriptor specified in the first |
2083 | * iocb is invalid. May fail with -EAGAIN if insufficient resources |
2084 | * are available to queue any iocbs. Will return 0 if nr is 0. Will |
2085 | * fail with -ENOSYS if not implemented. |
2086 | */ |
2087 | SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, |
2088 | struct iocb __user * __user *, iocbpp) |
2089 | { |
2090 | struct kioctx *ctx; |
2091 | long ret = 0; |
2092 | int i = 0; |
2093 | struct blk_plug plug; |
2094 | |
2095 | if (unlikely(nr < 0)) |
2096 | return -EINVAL; |
2097 | |
2098 | ctx = lookup_ioctx(ctx_id); |
2099 | if (unlikely(!ctx)) { |
2100 | pr_debug("EINVAL: invalid context id\n"); |
2101 | return -EINVAL; |
2102 | } |
2103 | |
2104 | if (nr > ctx->nr_events) |
2105 | nr = ctx->nr_events; |
2106 | |
2107 | if (nr > AIO_PLUG_THRESHOLD) |
2108 | blk_start_plug(&plug); |
2109 | for (i = 0; i < nr; i++) { |
2110 | struct iocb __user *user_iocb; |
2111 | |
2112 | if (unlikely(get_user(user_iocb, iocbpp + i))) { |
2113 | ret = -EFAULT; |
2114 | break; |
2115 | } |
2116 | |
2117 | ret = io_submit_one(ctx, user_iocb, compat: false); |
2118 | if (ret) |
2119 | break; |
2120 | } |
2121 | if (nr > AIO_PLUG_THRESHOLD) |
2122 | blk_finish_plug(&plug); |
2123 | |
2124 | percpu_ref_put(ref: &ctx->users); |
2125 | return i ? i : ret; |
2126 | } |
2127 | |
2128 | #ifdef CONFIG_COMPAT |
2129 | COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, |
2130 | int, nr, compat_uptr_t __user *, iocbpp) |
2131 | { |
2132 | struct kioctx *ctx; |
2133 | long ret = 0; |
2134 | int i = 0; |
2135 | struct blk_plug plug; |
2136 | |
2137 | if (unlikely(nr < 0)) |
2138 | return -EINVAL; |
2139 | |
2140 | ctx = lookup_ioctx(ctx_id); |
2141 | if (unlikely(!ctx)) { |
2142 | pr_debug("EINVAL: invalid context id\n"); |
2143 | return -EINVAL; |
2144 | } |
2145 | |
2146 | if (nr > ctx->nr_events) |
2147 | nr = ctx->nr_events; |
2148 | |
2149 | if (nr > AIO_PLUG_THRESHOLD) |
2150 | blk_start_plug(&plug); |
2151 | for (i = 0; i < nr; i++) { |
2152 | compat_uptr_t user_iocb; |
2153 | |
2154 | if (unlikely(get_user(user_iocb, iocbpp + i))) { |
2155 | ret = -EFAULT; |
2156 | break; |
2157 | } |
2158 | |
2159 | ret = io_submit_one(ctx, user_iocb: compat_ptr(uptr: user_iocb), compat: true); |
2160 | if (ret) |
2161 | break; |
2162 | } |
2163 | if (nr > AIO_PLUG_THRESHOLD) |
2164 | blk_finish_plug(&plug); |
2165 | |
2166 | percpu_ref_put(ref: &ctx->users); |
2167 | return i ? i : ret; |
2168 | } |
2169 | #endif |
2170 | |
2171 | /* sys_io_cancel: |
2172 | * Attempts to cancel an iocb previously passed to io_submit. If |
2173 | * the operation is successfully cancelled, the resulting event is |
2174 | * copied into the memory pointed to by result without being placed |
2175 | * into the completion queue and 0 is returned. May fail with |
2176 | * -EFAULT if any of the data structures pointed to are invalid. |
2177 | * May fail with -EINVAL if aio_context specified by ctx_id is |
2178 | * invalid. May fail with -EAGAIN if the iocb specified was not |
2179 | * cancelled. Will fail with -ENOSYS if not implemented. |
2180 | */ |
2181 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, |
2182 | struct io_event __user *, result) |
2183 | { |
2184 | struct kioctx *ctx; |
2185 | struct aio_kiocb *kiocb; |
2186 | int ret = -EINVAL; |
2187 | u32 key; |
2188 | u64 obj = (u64)(unsigned long)iocb; |
2189 | |
2190 | if (unlikely(get_user(key, &iocb->aio_key))) |
2191 | return -EFAULT; |
2192 | if (unlikely(key != KIOCB_KEY)) |
2193 | return -EINVAL; |
2194 | |
2195 | ctx = lookup_ioctx(ctx_id); |
2196 | if (unlikely(!ctx)) |
2197 | return -EINVAL; |
2198 | |
2199 | spin_lock_irq(lock: &ctx->ctx_lock); |
2200 | /* TODO: use a hash or array, this sucks. */ |
2201 | list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { |
2202 | if (kiocb->ki_res.obj == obj) { |
2203 | ret = kiocb->ki_cancel(&kiocb->rw); |
2204 | list_del_init(entry: &kiocb->ki_list); |
2205 | break; |
2206 | } |
2207 | } |
2208 | spin_unlock_irq(lock: &ctx->ctx_lock); |
2209 | |
2210 | if (!ret) { |
2211 | /* |
2212 | * The result argument is no longer used - the io_event is |
2213 | * always delivered via the ring buffer. -EINPROGRESS indicates |
2214 | * cancellation is progress: |
2215 | */ |
2216 | ret = -EINPROGRESS; |
2217 | } |
2218 | |
2219 | percpu_ref_put(ref: &ctx->users); |
2220 | |
2221 | return ret; |
2222 | } |
2223 | |
2224 | static long do_io_getevents(aio_context_t ctx_id, |
2225 | long min_nr, |
2226 | long nr, |
2227 | struct io_event __user *events, |
2228 | struct timespec64 *ts) |
2229 | { |
2230 | ktime_t until = ts ? timespec64_to_ktime(ts: *ts) : KTIME_MAX; |
2231 | struct kioctx *ioctx = lookup_ioctx(ctx_id); |
2232 | long ret = -EINVAL; |
2233 | |
2234 | if (likely(ioctx)) { |
2235 | if (likely(min_nr <= nr && min_nr >= 0)) |
2236 | ret = read_events(ctx: ioctx, min_nr, nr, event: events, until); |
2237 | percpu_ref_put(ref: &ioctx->users); |
2238 | } |
2239 | |
2240 | return ret; |
2241 | } |
2242 | |
2243 | /* io_getevents: |
2244 | * Attempts to read at least min_nr events and up to nr events from |
2245 | * the completion queue for the aio_context specified by ctx_id. If |
2246 | * it succeeds, the number of read events is returned. May fail with |
2247 | * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is |
2248 | * out of range, if timeout is out of range. May fail with -EFAULT |
2249 | * if any of the memory specified is invalid. May return 0 or |
2250 | * < min_nr if the timeout specified by timeout has elapsed |
2251 | * before sufficient events are available, where timeout == NULL |
2252 | * specifies an infinite timeout. Note that the timeout pointed to by |
2253 | * timeout is relative. Will fail with -ENOSYS if not implemented. |
2254 | */ |
2255 | #ifdef CONFIG_64BIT |
2256 | |
2257 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, |
2258 | long, min_nr, |
2259 | long, nr, |
2260 | struct io_event __user *, events, |
2261 | struct __kernel_timespec __user *, timeout) |
2262 | { |
2263 | struct timespec64 ts; |
2264 | int ret; |
2265 | |
2266 | if (timeout && unlikely(get_timespec64(&ts, timeout))) |
2267 | return -EFAULT; |
2268 | |
2269 | ret = do_io_getevents(ctx_id, min_nr, nr, events, ts: timeout ? &ts : NULL); |
2270 | if (!ret && signal_pending(current)) |
2271 | ret = -EINTR; |
2272 | return ret; |
2273 | } |
2274 | |
2275 | #endif |
2276 | |
2277 | struct __aio_sigset { |
2278 | const sigset_t __user *sigmask; |
2279 | size_t sigsetsize; |
2280 | }; |
2281 | |
2282 | SYSCALL_DEFINE6(io_pgetevents, |
2283 | aio_context_t, ctx_id, |
2284 | long, min_nr, |
2285 | long, nr, |
2286 | struct io_event __user *, events, |
2287 | struct __kernel_timespec __user *, timeout, |
2288 | const struct __aio_sigset __user *, usig) |
2289 | { |
2290 | struct __aio_sigset ksig = { NULL, }; |
2291 | struct timespec64 ts; |
2292 | bool interrupted; |
2293 | int ret; |
2294 | |
2295 | if (timeout && unlikely(get_timespec64(&ts, timeout))) |
2296 | return -EFAULT; |
2297 | |
2298 | if (usig && copy_from_user(to: &ksig, from: usig, n: sizeof(ksig))) |
2299 | return -EFAULT; |
2300 | |
2301 | ret = set_user_sigmask(umask: ksig.sigmask, sigsetsize: ksig.sigsetsize); |
2302 | if (ret) |
2303 | return ret; |
2304 | |
2305 | ret = do_io_getevents(ctx_id, min_nr, nr, events, ts: timeout ? &ts : NULL); |
2306 | |
2307 | interrupted = signal_pending(current); |
2308 | restore_saved_sigmask_unless(interrupted); |
2309 | if (interrupted && !ret) |
2310 | ret = -ERESTARTNOHAND; |
2311 | |
2312 | return ret; |
2313 | } |
2314 | |
2315 | #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) |
2316 | |
2317 | SYSCALL_DEFINE6(io_pgetevents_time32, |
2318 | aio_context_t, ctx_id, |
2319 | long, min_nr, |
2320 | long, nr, |
2321 | struct io_event __user *, events, |
2322 | struct old_timespec32 __user *, timeout, |
2323 | const struct __aio_sigset __user *, usig) |
2324 | { |
2325 | struct __aio_sigset ksig = { NULL, }; |
2326 | struct timespec64 ts; |
2327 | bool interrupted; |
2328 | int ret; |
2329 | |
2330 | if (timeout && unlikely(get_old_timespec32(&ts, timeout))) |
2331 | return -EFAULT; |
2332 | |
2333 | if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) |
2334 | return -EFAULT; |
2335 | |
2336 | |
2337 | ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize); |
2338 | if (ret) |
2339 | return ret; |
2340 | |
2341 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); |
2342 | |
2343 | interrupted = signal_pending(current); |
2344 | restore_saved_sigmask_unless(interrupted); |
2345 | if (interrupted && !ret) |
2346 | ret = -ERESTARTNOHAND; |
2347 | |
2348 | return ret; |
2349 | } |
2350 | |
2351 | #endif |
2352 | |
2353 | #if defined(CONFIG_COMPAT_32BIT_TIME) |
2354 | |
2355 | SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id, |
2356 | __s32, min_nr, |
2357 | __s32, nr, |
2358 | struct io_event __user *, events, |
2359 | struct old_timespec32 __user *, timeout) |
2360 | { |
2361 | struct timespec64 t; |
2362 | int ret; |
2363 | |
2364 | if (timeout && get_old_timespec32(&t, timeout)) |
2365 | return -EFAULT; |
2366 | |
2367 | ret = do_io_getevents(ctx_id, min_nr, nr, events, ts: timeout ? &t : NULL); |
2368 | if (!ret && signal_pending(current)) |
2369 | ret = -EINTR; |
2370 | return ret; |
2371 | } |
2372 | |
2373 | #endif |
2374 | |
2375 | #ifdef CONFIG_COMPAT |
2376 | |
2377 | struct __compat_aio_sigset { |
2378 | compat_uptr_t sigmask; |
2379 | compat_size_t sigsetsize; |
2380 | }; |
2381 | |
2382 | #if defined(CONFIG_COMPAT_32BIT_TIME) |
2383 | |
2384 | COMPAT_SYSCALL_DEFINE6(io_pgetevents, |
2385 | compat_aio_context_t, ctx_id, |
2386 | compat_long_t, min_nr, |
2387 | compat_long_t, nr, |
2388 | struct io_event __user *, events, |
2389 | struct old_timespec32 __user *, timeout, |
2390 | const struct __compat_aio_sigset __user *, usig) |
2391 | { |
2392 | struct __compat_aio_sigset ksig = { 0, }; |
2393 | struct timespec64 t; |
2394 | bool interrupted; |
2395 | int ret; |
2396 | |
2397 | if (timeout && get_old_timespec32(&t, timeout)) |
2398 | return -EFAULT; |
2399 | |
2400 | if (usig && copy_from_user(to: &ksig, from: usig, n: sizeof(ksig))) |
2401 | return -EFAULT; |
2402 | |
2403 | ret = set_compat_user_sigmask(umask: compat_ptr(uptr: ksig.sigmask), sigsetsize: ksig.sigsetsize); |
2404 | if (ret) |
2405 | return ret; |
2406 | |
2407 | ret = do_io_getevents(ctx_id, min_nr, nr, events, ts: timeout ? &t : NULL); |
2408 | |
2409 | interrupted = signal_pending(current); |
2410 | restore_saved_sigmask_unless(interrupted); |
2411 | if (interrupted && !ret) |
2412 | ret = -ERESTARTNOHAND; |
2413 | |
2414 | return ret; |
2415 | } |
2416 | |
2417 | #endif |
2418 | |
2419 | COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, |
2420 | compat_aio_context_t, ctx_id, |
2421 | compat_long_t, min_nr, |
2422 | compat_long_t, nr, |
2423 | struct io_event __user *, events, |
2424 | struct __kernel_timespec __user *, timeout, |
2425 | const struct __compat_aio_sigset __user *, usig) |
2426 | { |
2427 | struct __compat_aio_sigset ksig = { 0, }; |
2428 | struct timespec64 t; |
2429 | bool interrupted; |
2430 | int ret; |
2431 | |
2432 | if (timeout && get_timespec64(ts: &t, uts: timeout)) |
2433 | return -EFAULT; |
2434 | |
2435 | if (usig && copy_from_user(to: &ksig, from: usig, n: sizeof(ksig))) |
2436 | return -EFAULT; |
2437 | |
2438 | ret = set_compat_user_sigmask(umask: compat_ptr(uptr: ksig.sigmask), sigsetsize: ksig.sigsetsize); |
2439 | if (ret) |
2440 | return ret; |
2441 | |
2442 | ret = do_io_getevents(ctx_id, min_nr, nr, events, ts: timeout ? &t : NULL); |
2443 | |
2444 | interrupted = signal_pending(current); |
2445 | restore_saved_sigmask_unless(interrupted); |
2446 | if (interrupted && !ret) |
2447 | ret = -ERESTARTNOHAND; |
2448 | |
2449 | return ret; |
2450 | } |
2451 | #endif |
2452 |
Definitions
- aio_ring
- kioctx_table
- kioctx_cpu
- ctx_rq_wait
- kioctx
- fsync_iocb
- poll_iocb
- aio_kiocb
- aio_nr_lock
- aio_nr
- aio_max_nr
- aio_sysctls
- aio_sysctl_init
- kiocb_cachep
- kioctx_cachep
- aio_mnt
- aio_ring_fops
- aio_ctx_aops
- aio_private_file
- aio_init_fs_context
- aio_setup
- put_aio_ring_file
- aio_free_ring
- aio_ring_mremap
- aio_ring_vm_ops
- aio_ring_mmap
- aio_ring_fops
- aio_migrate_folio
- aio_ctx_aops
- aio_setup_ring
- kiocb_set_cancel_fn
- free_ioctx
- free_ioctx_reqs
- free_ioctx_users
- ioctx_add_table
- aio_nr_sub
- ioctx_alloc
- kill_ioctx
- exit_aio
- put_reqs_available
- __get_reqs_available
- refill_reqs_available
- user_refill_reqs_available
- get_reqs_available
- aio_get_req
- lookup_ioctx
- iocb_destroy
- aio_waiter
- aio_complete
- iocb_put
- aio_read_events_ring
- aio_read_events
- read_events
- aio_remove_iocb
- aio_complete_rw
- aio_prep_rw
- aio_setup_rw
- aio_rw_done
- aio_read
- aio_write
- aio_fsync_work
- aio_fsync
- aio_poll_put_work
- poll_iocb_lock_wq
- poll_iocb_unlock_wq
- aio_poll_complete_work
- aio_poll_cancel
- aio_poll_wake
- aio_poll_table
- aio_poll_queue_proc
- aio_poll
- __io_submit_one
- io_submit_one
- do_io_getevents
- __aio_sigset
Improve your Profiling and Debugging skills
Find out more