Warning: This file is not a C or C++ file. It does not have highlighting.
1 | #ifndef IO_URING_TYPES_H |
---|---|
2 | #define IO_URING_TYPES_H |
3 | |
4 | #include <linux/blkdev.h> |
5 | #include <linux/task_work.h> |
6 | #include <linux/bitmap.h> |
7 | #include <linux/llist.h> |
8 | #include <uapi/linux/io_uring.h> |
9 | |
10 | struct io_wq_work_node { |
11 | struct io_wq_work_node *next; |
12 | }; |
13 | |
14 | struct io_wq_work_list { |
15 | struct io_wq_work_node *first; |
16 | struct io_wq_work_node *last; |
17 | }; |
18 | |
19 | struct io_wq_work { |
20 | struct io_wq_work_node list; |
21 | unsigned flags; |
22 | /* place it here instead of io_kiocb as it fills padding and saves 4B */ |
23 | int cancel_seq; |
24 | }; |
25 | |
26 | struct io_fixed_file { |
27 | /* file * with additional FFS_* flags */ |
28 | unsigned long file_ptr; |
29 | }; |
30 | |
31 | struct io_file_table { |
32 | struct io_fixed_file *files; |
33 | unsigned long *bitmap; |
34 | unsigned int alloc_hint; |
35 | }; |
36 | |
37 | struct io_hash_bucket { |
38 | spinlock_t lock; |
39 | struct hlist_head list; |
40 | } ____cacheline_aligned_in_smp; |
41 | |
42 | struct io_hash_table { |
43 | struct io_hash_bucket *hbs; |
44 | unsigned hash_bits; |
45 | }; |
46 | |
47 | /* |
48 | * Arbitrary limit, can be raised if need be |
49 | */ |
50 | #define IO_RINGFD_REG_MAX 16 |
51 | |
52 | struct io_uring_task { |
53 | /* submission side */ |
54 | int cached_refs; |
55 | const struct io_ring_ctx *last; |
56 | struct io_wq *io_wq; |
57 | struct file *registered_rings[IO_RINGFD_REG_MAX]; |
58 | |
59 | struct xarray xa; |
60 | struct wait_queue_head wait; |
61 | atomic_t in_cancel; |
62 | atomic_t inflight_tracked; |
63 | struct percpu_counter inflight; |
64 | |
65 | struct { /* task_work */ |
66 | struct llist_head task_list; |
67 | struct callback_head task_work; |
68 | } ____cacheline_aligned_in_smp; |
69 | }; |
70 | |
71 | struct io_uring { |
72 | u32 head; |
73 | u32 tail; |
74 | }; |
75 | |
76 | /* |
77 | * This data is shared with the application through the mmap at offsets |
78 | * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. |
79 | * |
80 | * The offsets to the member fields are published through struct |
81 | * io_sqring_offsets when calling io_uring_setup. |
82 | */ |
83 | struct io_rings { |
84 | /* |
85 | * Head and tail offsets into the ring; the offsets need to be |
86 | * masked to get valid indices. |
87 | * |
88 | * The kernel controls head of the sq ring and the tail of the cq ring, |
89 | * and the application controls tail of the sq ring and the head of the |
90 | * cq ring. |
91 | */ |
92 | struct io_uring sq, cq; |
93 | /* |
94 | * Bitmasks to apply to head and tail offsets (constant, equals |
95 | * ring_entries - 1) |
96 | */ |
97 | u32 sq_ring_mask, cq_ring_mask; |
98 | /* Ring sizes (constant, power of 2) */ |
99 | u32 sq_ring_entries, cq_ring_entries; |
100 | /* |
101 | * Number of invalid entries dropped by the kernel due to |
102 | * invalid index stored in array |
103 | * |
104 | * Written by the kernel, shouldn't be modified by the |
105 | * application (i.e. get number of "new events" by comparing to |
106 | * cached value). |
107 | * |
108 | * After a new SQ head value was read by the application this |
109 | * counter includes all submissions that were dropped reaching |
110 | * the new SQ head (and possibly more). |
111 | */ |
112 | u32 sq_dropped; |
113 | /* |
114 | * Runtime SQ flags |
115 | * |
116 | * Written by the kernel, shouldn't be modified by the |
117 | * application. |
118 | * |
119 | * The application needs a full memory barrier before checking |
120 | * for IORING_SQ_NEED_WAKEUP after updating the sq tail. |
121 | */ |
122 | atomic_t sq_flags; |
123 | /* |
124 | * Runtime CQ flags |
125 | * |
126 | * Written by the application, shouldn't be modified by the |
127 | * kernel. |
128 | */ |
129 | u32 cq_flags; |
130 | /* |
131 | * Number of completion events lost because the queue was full; |
132 | * this should be avoided by the application by making sure |
133 | * there are not more requests pending than there is space in |
134 | * the completion queue. |
135 | * |
136 | * Written by the kernel, shouldn't be modified by the |
137 | * application (i.e. get number of "new events" by comparing to |
138 | * cached value). |
139 | * |
140 | * As completion events come in out of order this counter is not |
141 | * ordered with any other data. |
142 | */ |
143 | u32 cq_overflow; |
144 | /* |
145 | * Ring buffer of completion events. |
146 | * |
147 | * The kernel writes completion events fresh every time they are |
148 | * produced, so the application is allowed to modify pending |
149 | * entries. |
150 | */ |
151 | struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; |
152 | }; |
153 | |
154 | struct io_restriction { |
155 | DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); |
156 | DECLARE_BITMAP(sqe_op, IORING_OP_LAST); |
157 | u8 sqe_flags_allowed; |
158 | u8 sqe_flags_required; |
159 | bool registered; |
160 | }; |
161 | |
162 | struct io_submit_link { |
163 | struct io_kiocb *head; |
164 | struct io_kiocb *last; |
165 | }; |
166 | |
167 | struct io_submit_state { |
168 | /* inline/task_work completion list, under ->uring_lock */ |
169 | struct io_wq_work_node free_list; |
170 | /* batch completion logic */ |
171 | struct io_wq_work_list compl_reqs; |
172 | struct io_submit_link link; |
173 | |
174 | bool plug_started; |
175 | bool need_plug; |
176 | unsigned short submit_nr; |
177 | unsigned int cqes_count; |
178 | struct blk_plug plug; |
179 | }; |
180 | |
181 | struct io_ev_fd { |
182 | struct eventfd_ctx *cq_ev_fd; |
183 | unsigned int eventfd_async: 1; |
184 | struct rcu_head rcu; |
185 | atomic_t refs; |
186 | atomic_t ops; |
187 | }; |
188 | |
189 | struct io_alloc_cache { |
190 | struct io_wq_work_node list; |
191 | unsigned int nr_cached; |
192 | unsigned int max_cached; |
193 | size_t elem_size; |
194 | }; |
195 | |
196 | struct io_ring_ctx { |
197 | /* const or read-mostly hot data */ |
198 | struct { |
199 | unsigned int flags; |
200 | unsigned int drain_next: 1; |
201 | unsigned int restricted: 1; |
202 | unsigned int off_timeout_used: 1; |
203 | unsigned int drain_active: 1; |
204 | unsigned int has_evfd: 1; |
205 | /* all CQEs should be posted only by the submitter task */ |
206 | unsigned int task_complete: 1; |
207 | unsigned int lockless_cq: 1; |
208 | unsigned int syscall_iopoll: 1; |
209 | unsigned int poll_activated: 1; |
210 | unsigned int drain_disabled: 1; |
211 | unsigned int compat: 1; |
212 | |
213 | struct task_struct *submitter_task; |
214 | struct io_rings *rings; |
215 | struct percpu_ref refs; |
216 | |
217 | enum task_work_notify_mode notify_method; |
218 | } ____cacheline_aligned_in_smp; |
219 | |
220 | /* submission data */ |
221 | struct { |
222 | struct mutex uring_lock; |
223 | |
224 | /* |
225 | * Ring buffer of indices into array of io_uring_sqe, which is |
226 | * mmapped by the application using the IORING_OFF_SQES offset. |
227 | * |
228 | * This indirection could e.g. be used to assign fixed |
229 | * io_uring_sqe entries to operations and only submit them to |
230 | * the queue when needed. |
231 | * |
232 | * The kernel modifies neither the indices array nor the entries |
233 | * array. |
234 | */ |
235 | u32 *sq_array; |
236 | struct io_uring_sqe *sq_sqes; |
237 | unsigned cached_sq_head; |
238 | unsigned sq_entries; |
239 | |
240 | /* |
241 | * Fixed resources fast path, should be accessed only under |
242 | * uring_lock, and updated through io_uring_register(2) |
243 | */ |
244 | struct io_rsrc_node *rsrc_node; |
245 | atomic_t cancel_seq; |
246 | struct io_file_table file_table; |
247 | unsigned nr_user_files; |
248 | unsigned nr_user_bufs; |
249 | struct io_mapped_ubuf **user_bufs; |
250 | |
251 | struct io_submit_state submit_state; |
252 | |
253 | struct io_buffer_list *io_bl; |
254 | struct xarray io_bl_xa; |
255 | |
256 | struct io_hash_table cancel_table_locked; |
257 | struct io_alloc_cache apoll_cache; |
258 | struct io_alloc_cache netmsg_cache; |
259 | |
260 | /* |
261 | * ->iopoll_list is protected by the ctx->uring_lock for |
262 | * io_uring instances that don't use IORING_SETUP_SQPOLL. |
263 | * For SQPOLL, only the single threaded io_sq_thread() will |
264 | * manipulate the list, hence no extra locking is needed there. |
265 | */ |
266 | struct io_wq_work_list iopoll_list; |
267 | bool poll_multi_queue; |
268 | |
269 | /* |
270 | * Any cancelable uring_cmd is added to this list in |
271 | * ->uring_cmd() by io_uring_cmd_insert_cancelable() |
272 | */ |
273 | struct hlist_head cancelable_uring_cmd; |
274 | } ____cacheline_aligned_in_smp; |
275 | |
276 | struct { |
277 | /* |
278 | * We cache a range of free CQEs we can use, once exhausted it |
279 | * should go through a slower range setup, see __io_get_cqe() |
280 | */ |
281 | struct io_uring_cqe *cqe_cached; |
282 | struct io_uring_cqe *cqe_sentinel; |
283 | |
284 | unsigned cached_cq_tail; |
285 | unsigned cq_entries; |
286 | struct io_ev_fd __rcu *io_ev_fd; |
287 | unsigned cq_extra; |
288 | } ____cacheline_aligned_in_smp; |
289 | |
290 | /* |
291 | * task_work and async notification delivery cacheline. Expected to |
292 | * regularly bounce b/w CPUs. |
293 | */ |
294 | struct { |
295 | struct llist_head work_llist; |
296 | unsigned long check_cq; |
297 | atomic_t cq_wait_nr; |
298 | atomic_t cq_timeouts; |
299 | struct wait_queue_head cq_wait; |
300 | } ____cacheline_aligned_in_smp; |
301 | |
302 | /* timeouts */ |
303 | struct { |
304 | spinlock_t timeout_lock; |
305 | struct list_head timeout_list; |
306 | struct list_head ltimeout_list; |
307 | unsigned cq_last_tm_flush; |
308 | } ____cacheline_aligned_in_smp; |
309 | |
310 | struct io_uring_cqe completion_cqes[16]; |
311 | |
312 | spinlock_t completion_lock; |
313 | |
314 | /* IRQ completion list, under ->completion_lock */ |
315 | struct io_wq_work_list locked_free_list; |
316 | unsigned int locked_free_nr; |
317 | |
318 | struct list_head io_buffers_comp; |
319 | struct list_head cq_overflow_list; |
320 | struct io_hash_table cancel_table; |
321 | |
322 | struct hlist_head waitid_list; |
323 | |
324 | #ifdef CONFIG_FUTEX |
325 | struct hlist_head futex_list; |
326 | struct io_alloc_cache futex_cache; |
327 | #endif |
328 | |
329 | const struct cred *sq_creds; /* cred used for __io_sq_thread() */ |
330 | struct io_sq_data *sq_data; /* if using sq thread polling */ |
331 | |
332 | struct wait_queue_head sqo_sq_wait; |
333 | struct list_head sqd_list; |
334 | |
335 | unsigned int file_alloc_start; |
336 | unsigned int file_alloc_end; |
337 | |
338 | struct xarray personalities; |
339 | u32 pers_next; |
340 | |
341 | struct list_head io_buffers_cache; |
342 | |
343 | /* Keep this last, we don't need it for the fast path */ |
344 | struct wait_queue_head poll_wq; |
345 | struct io_restriction restrictions; |
346 | |
347 | /* slow path rsrc auxilary data, used by update/register */ |
348 | struct io_mapped_ubuf *dummy_ubuf; |
349 | struct io_rsrc_data *file_data; |
350 | struct io_rsrc_data *buf_data; |
351 | |
352 | /* protected by ->uring_lock */ |
353 | struct list_head rsrc_ref_list; |
354 | struct io_alloc_cache rsrc_node_cache; |
355 | struct wait_queue_head rsrc_quiesce_wq; |
356 | unsigned rsrc_quiesce; |
357 | |
358 | #if defined(CONFIG_UNIX) |
359 | struct socket *ring_sock; |
360 | #endif |
361 | /* hashed buffered write serialization */ |
362 | struct io_wq_hash *hash_map; |
363 | |
364 | /* Only used for accounting purposes */ |
365 | struct user_struct *user; |
366 | struct mm_struct *mm_account; |
367 | |
368 | /* ctx exit and cancelation */ |
369 | struct llist_head fallback_llist; |
370 | struct delayed_work fallback_work; |
371 | struct work_struct exit_work; |
372 | struct list_head tctx_list; |
373 | struct completion ref_comp; |
374 | |
375 | /* io-wq management, e.g. thread count */ |
376 | u32 iowq_limits[2]; |
377 | bool iowq_limits_set; |
378 | |
379 | struct callback_head poll_wq_task_work; |
380 | struct list_head defer_list; |
381 | unsigned sq_thread_idle; |
382 | /* protected by ->completion_lock */ |
383 | unsigned evfd_last_cq_tail; |
384 | |
385 | /* |
386 | * If IORING_SETUP_NO_MMAP is used, then the below holds |
387 | * the gup'ed pages for the two rings, and the sqes. |
388 | */ |
389 | unsigned short n_ring_pages; |
390 | unsigned short n_sqe_pages; |
391 | struct page **ring_pages; |
392 | struct page **sqe_pages; |
393 | }; |
394 | |
395 | struct io_tw_state { |
396 | /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */ |
397 | bool locked; |
398 | }; |
399 | |
400 | enum { |
401 | REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, |
402 | REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, |
403 | REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, |
404 | REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, |
405 | REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, |
406 | REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, |
407 | REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT, |
408 | |
409 | /* first byte is taken by user flags, shift it to not overlap */ |
410 | REQ_F_FAIL_BIT = 8, |
411 | REQ_F_INFLIGHT_BIT, |
412 | REQ_F_CUR_POS_BIT, |
413 | REQ_F_NOWAIT_BIT, |
414 | REQ_F_LINK_TIMEOUT_BIT, |
415 | REQ_F_NEED_CLEANUP_BIT, |
416 | REQ_F_POLLED_BIT, |
417 | REQ_F_BUFFER_SELECTED_BIT, |
418 | REQ_F_BUFFER_RING_BIT, |
419 | REQ_F_REISSUE_BIT, |
420 | REQ_F_CREDS_BIT, |
421 | REQ_F_REFCOUNT_BIT, |
422 | REQ_F_ARM_LTIMEOUT_BIT, |
423 | REQ_F_ASYNC_DATA_BIT, |
424 | REQ_F_SKIP_LINK_CQES_BIT, |
425 | REQ_F_SINGLE_POLL_BIT, |
426 | REQ_F_DOUBLE_POLL_BIT, |
427 | REQ_F_PARTIAL_IO_BIT, |
428 | REQ_F_APOLL_MULTISHOT_BIT, |
429 | REQ_F_CLEAR_POLLIN_BIT, |
430 | REQ_F_HASH_LOCKED_BIT, |
431 | /* keep async read/write and isreg together and in order */ |
432 | REQ_F_SUPPORT_NOWAIT_BIT, |
433 | REQ_F_ISREG_BIT, |
434 | |
435 | /* not a real bit, just to check we're not overflowing the space */ |
436 | __REQ_F_LAST_BIT, |
437 | }; |
438 | |
439 | enum { |
440 | /* ctx owns file */ |
441 | REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT), |
442 | /* drain existing IO first */ |
443 | REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT), |
444 | /* linked sqes */ |
445 | REQ_F_LINK = BIT(REQ_F_LINK_BIT), |
446 | /* doesn't sever on completion < 0 */ |
447 | REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT), |
448 | /* IOSQE_ASYNC */ |
449 | REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT), |
450 | /* IOSQE_BUFFER_SELECT */ |
451 | REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT), |
452 | /* IOSQE_CQE_SKIP_SUCCESS */ |
453 | REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT), |
454 | |
455 | /* fail rest of links */ |
456 | REQ_F_FAIL = BIT(REQ_F_FAIL_BIT), |
457 | /* on inflight list, should be cancelled and waited on exit reliably */ |
458 | REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT), |
459 | /* read/write uses file position */ |
460 | REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT), |
461 | /* must not punt to workers */ |
462 | REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT), |
463 | /* has or had linked timeout */ |
464 | REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT), |
465 | /* needs cleanup */ |
466 | REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), |
467 | /* already went through poll handler */ |
468 | REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), |
469 | /* buffer already selected */ |
470 | REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), |
471 | /* buffer selected from ring, needs commit */ |
472 | REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT), |
473 | /* caller should reissue async */ |
474 | REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), |
475 | /* supports async reads/writes */ |
476 | REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT), |
477 | /* regular file */ |
478 | REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), |
479 | /* has creds assigned */ |
480 | REQ_F_CREDS = BIT(REQ_F_CREDS_BIT), |
481 | /* skip refcounting if not set */ |
482 | REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT), |
483 | /* there is a linked timeout that has to be armed */ |
484 | REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT), |
485 | /* ->async_data allocated */ |
486 | REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT), |
487 | /* don't post CQEs while failing linked requests */ |
488 | REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT), |
489 | /* single poll may be active */ |
490 | REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT), |
491 | /* double poll may active */ |
492 | REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT), |
493 | /* request has already done partial IO */ |
494 | REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT), |
495 | /* fast poll multishot mode */ |
496 | REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT), |
497 | /* recvmsg special flag, clear EPOLLIN */ |
498 | REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT), |
499 | /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ |
500 | REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT), |
501 | }; |
502 | |
503 | typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); |
504 | |
505 | struct io_task_work { |
506 | struct llist_node node; |
507 | io_req_tw_func_t func; |
508 | }; |
509 | |
510 | struct io_cqe { |
511 | __u64 user_data; |
512 | __s32 res; |
513 | /* fd initially, then cflags for completion */ |
514 | union { |
515 | __u32 flags; |
516 | int fd; |
517 | }; |
518 | }; |
519 | |
520 | /* |
521 | * Each request type overlays its private data structure on top of this one. |
522 | * They must not exceed this one in size. |
523 | */ |
524 | struct io_cmd_data { |
525 | struct file *file; |
526 | /* each command gets 56 bytes of data */ |
527 | __u8 data[56]; |
528 | }; |
529 | |
530 | static inline void io_kiocb_cmd_sz_check(size_t cmd_sz) |
531 | { |
532 | BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data)); |
533 | } |
534 | #define io_kiocb_to_cmd(req, cmd_type) ( \ |
535 | io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \ |
536 | ((cmd_type *)&(req)->cmd) \ |
537 | ) |
538 | #define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr) |
539 | |
540 | struct io_kiocb { |
541 | union { |
542 | /* |
543 | * NOTE! Each of the io_kiocb union members has the file pointer |
544 | * as the first entry in their struct definition. So you can |
545 | * access the file pointer through any of the sub-structs, |
546 | * or directly as just 'file' in this struct. |
547 | */ |
548 | struct file *file; |
549 | struct io_cmd_data cmd; |
550 | }; |
551 | |
552 | u8 opcode; |
553 | /* polled IO has completed */ |
554 | u8 iopoll_completed; |
555 | /* |
556 | * Can be either a fixed buffer index, or used with provided buffers. |
557 | * For the latter, before issue it points to the buffer group ID, |
558 | * and after selection it points to the buffer ID itself. |
559 | */ |
560 | u16 buf_index; |
561 | unsigned int flags; |
562 | |
563 | struct io_cqe cqe; |
564 | |
565 | struct io_ring_ctx *ctx; |
566 | struct task_struct *task; |
567 | |
568 | struct io_rsrc_node *rsrc_node; |
569 | |
570 | union { |
571 | /* store used ubuf, so we can prevent reloading */ |
572 | struct io_mapped_ubuf *imu; |
573 | |
574 | /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ |
575 | struct io_buffer *kbuf; |
576 | |
577 | /* |
578 | * stores buffer ID for ring provided buffers, valid IFF |
579 | * REQ_F_BUFFER_RING is set. |
580 | */ |
581 | struct io_buffer_list *buf_list; |
582 | }; |
583 | |
584 | union { |
585 | /* used by request caches, completion batching and iopoll */ |
586 | struct io_wq_work_node comp_list; |
587 | /* cache ->apoll->events */ |
588 | __poll_t apoll_events; |
589 | }; |
590 | atomic_t refs; |
591 | atomic_t poll_refs; |
592 | struct io_task_work io_task_work; |
593 | unsigned nr_tw; |
594 | /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ |
595 | struct hlist_node hash_node; |
596 | /* internal polling, see IORING_FEAT_FAST_POLL */ |
597 | struct async_poll *apoll; |
598 | /* opcode allocated if it needs to store data for async defer */ |
599 | void *async_data; |
600 | /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ |
601 | struct io_kiocb *link; |
602 | /* custom credentials, valid IFF REQ_F_CREDS is set */ |
603 | const struct cred *creds; |
604 | struct io_wq_work work; |
605 | |
606 | struct { |
607 | u64 extra1; |
608 | u64 extra2; |
609 | } big_cqe; |
610 | }; |
611 | |
612 | struct io_overflow_cqe { |
613 | struct list_head list; |
614 | struct io_uring_cqe cqe; |
615 | }; |
616 | |
617 | #endif |
618 |
Warning: This file is not a C or C++ file. It does not have highlighting.