1 | #ifndef IOU_CORE_H |
2 | #define IOU_CORE_H |
3 | |
4 | #include <linux/errno.h> |
5 | #include <linux/lockdep.h> |
6 | #include <linux/resume_user_mode.h> |
7 | #include <linux/kasan.h> |
8 | #include <linux/io_uring_types.h> |
9 | #include <uapi/linux/eventpoll.h> |
10 | #include "io-wq.h" |
11 | #include "slist.h" |
12 | #include "filetable.h" |
13 | |
14 | #ifndef CREATE_TRACE_POINTS |
15 | #include <trace/events/io_uring.h> |
16 | #endif |
17 | |
18 | enum { |
19 | /* |
20 | * A hint to not wake right away but delay until there are enough of |
21 | * tw's queued to match the number of CQEs the task is waiting for. |
22 | * |
23 | * Must not be used wirh requests generating more than one CQE. |
24 | * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set. |
25 | */ |
26 | IOU_F_TWQ_LAZY_WAKE = 1, |
27 | }; |
28 | |
29 | enum { |
30 | IOU_OK = 0, |
31 | IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, |
32 | |
33 | /* |
34 | * Intended only when both IO_URING_F_MULTISHOT is passed |
35 | * to indicate to the poll runner that multishot should be |
36 | * removed and the result is set on req->cqe.res. |
37 | */ |
38 | IOU_STOP_MULTISHOT = -ECANCELED, |
39 | }; |
40 | |
41 | bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); |
42 | void io_req_cqe_overflow(struct io_kiocb *req); |
43 | int io_run_task_work_sig(struct io_ring_ctx *ctx); |
44 | void io_req_defer_failed(struct io_kiocb *req, s32 res); |
45 | void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); |
46 | bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); |
47 | bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags); |
48 | void __io_commit_cqring_flush(struct io_ring_ctx *ctx); |
49 | |
50 | struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); |
51 | |
52 | struct file *io_file_get_normal(struct io_kiocb *req, int fd); |
53 | struct file *io_file_get_fixed(struct io_kiocb *req, int fd, |
54 | unsigned issue_flags); |
55 | |
56 | void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); |
57 | bool io_is_uring_fops(struct file *file); |
58 | bool io_alloc_async_data(struct io_kiocb *req); |
59 | void io_req_task_queue(struct io_kiocb *req); |
60 | void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use); |
61 | void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts); |
62 | void io_req_task_queue_fail(struct io_kiocb *req, int ret); |
63 | void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts); |
64 | void tctx_task_work(struct callback_head *cb); |
65 | __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); |
66 | int io_uring_alloc_task_context(struct task_struct *task, |
67 | struct io_ring_ctx *ctx); |
68 | |
69 | int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, |
70 | int start, int end); |
71 | |
72 | int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts); |
73 | int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); |
74 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); |
75 | void __io_submit_flush_completions(struct io_ring_ctx *ctx); |
76 | int io_req_prep_async(struct io_kiocb *req); |
77 | |
78 | struct io_wq_work *io_wq_free_work(struct io_wq_work *work); |
79 | void io_wq_submit_work(struct io_wq_work *work); |
80 | |
81 | void io_free_req(struct io_kiocb *req); |
82 | void io_queue_next(struct io_kiocb *req); |
83 | void io_task_refs_refill(struct io_uring_task *tctx); |
84 | bool __io_alloc_req_refill(struct io_ring_ctx *ctx); |
85 | |
86 | bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, |
87 | bool cancel_all); |
88 | |
89 | #if defined(CONFIG_PROVE_LOCKING) |
90 | static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) |
91 | { |
92 | lockdep_assert(in_task()); |
93 | |
94 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
95 | lockdep_assert_held(&ctx->uring_lock); |
96 | } else if (!ctx->task_complete) { |
97 | lockdep_assert_held(&ctx->completion_lock); |
98 | } else if (ctx->submitter_task) { |
99 | /* |
100 | * ->submitter_task may be NULL and we can still post a CQE, |
101 | * if the ring has been setup with IORING_SETUP_R_DISABLED. |
102 | * Not from an SQE, as those cannot be submitted, but via |
103 | * updating tagged resources. |
104 | */ |
105 | if (ctx->submitter_task->flags & PF_EXITING) |
106 | lockdep_assert(current_work()); |
107 | else |
108 | lockdep_assert(current == ctx->submitter_task); |
109 | } |
110 | } |
111 | #else |
112 | static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) |
113 | { |
114 | } |
115 | #endif |
116 | |
117 | static inline void io_req_task_work_add(struct io_kiocb *req) |
118 | { |
119 | __io_req_task_work_add(req, flags: 0); |
120 | } |
121 | |
122 | #define io_for_each_link(pos, head) \ |
123 | for (pos = (head); pos; pos = pos->link) |
124 | |
125 | static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx, |
126 | struct io_uring_cqe **ret, |
127 | bool overflow) |
128 | { |
129 | io_lockdep_assert_cq_locked(ctx); |
130 | |
131 | if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) { |
132 | if (unlikely(!io_cqe_cache_refill(ctx, overflow))) |
133 | return false; |
134 | } |
135 | *ret = ctx->cqe_cached; |
136 | ctx->cached_cq_tail++; |
137 | ctx->cqe_cached++; |
138 | if (ctx->flags & IORING_SETUP_CQE32) |
139 | ctx->cqe_cached++; |
140 | return true; |
141 | } |
142 | |
143 | static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret) |
144 | { |
145 | return io_get_cqe_overflow(ctx, ret, overflow: false); |
146 | } |
147 | |
148 | static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, |
149 | struct io_kiocb *req) |
150 | { |
151 | struct io_uring_cqe *cqe; |
152 | |
153 | /* |
154 | * If we can't get a cq entry, userspace overflowed the |
155 | * submission (by quite a lot). Increment the overflow count in |
156 | * the ring. |
157 | */ |
158 | if (unlikely(!io_get_cqe(ctx, &cqe))) |
159 | return false; |
160 | |
161 | if (trace_io_uring_complete_enabled()) |
162 | trace_io_uring_complete(ctx: req->ctx, req, user_data: req->cqe.user_data, |
163 | res: req->cqe.res, cflags: req->cqe.flags, |
164 | extra1: req->big_cqe.extra1, extra2: req->big_cqe.extra2); |
165 | |
166 | memcpy(cqe, &req->cqe, sizeof(*cqe)); |
167 | if (ctx->flags & IORING_SETUP_CQE32) { |
168 | memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); |
169 | memset(&req->big_cqe, 0, sizeof(req->big_cqe)); |
170 | } |
171 | return true; |
172 | } |
173 | |
174 | static inline void req_set_fail(struct io_kiocb *req) |
175 | { |
176 | req->flags |= REQ_F_FAIL; |
177 | if (req->flags & REQ_F_CQE_SKIP) { |
178 | req->flags &= ~REQ_F_CQE_SKIP; |
179 | req->flags |= REQ_F_SKIP_LINK_CQES; |
180 | } |
181 | } |
182 | |
183 | static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) |
184 | { |
185 | req->cqe.res = res; |
186 | req->cqe.flags = cflags; |
187 | } |
188 | |
189 | static inline bool req_has_async_data(struct io_kiocb *req) |
190 | { |
191 | return req->flags & REQ_F_ASYNC_DATA; |
192 | } |
193 | |
194 | static inline void io_put_file(struct io_kiocb *req) |
195 | { |
196 | if (!(req->flags & REQ_F_FIXED_FILE) && req->file) |
197 | fput(req->file); |
198 | } |
199 | |
200 | static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, |
201 | unsigned issue_flags) |
202 | { |
203 | lockdep_assert_held(&ctx->uring_lock); |
204 | if (issue_flags & IO_URING_F_UNLOCKED) |
205 | mutex_unlock(lock: &ctx->uring_lock); |
206 | } |
207 | |
208 | static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, |
209 | unsigned issue_flags) |
210 | { |
211 | /* |
212 | * "Normal" inline submissions always hold the uring_lock, since we |
213 | * grab it from the system call. Same is true for the SQPOLL offload. |
214 | * The only exception is when we've detached the request and issue it |
215 | * from an async worker thread, grab the lock for that case. |
216 | */ |
217 | if (issue_flags & IO_URING_F_UNLOCKED) |
218 | mutex_lock(&ctx->uring_lock); |
219 | lockdep_assert_held(&ctx->uring_lock); |
220 | } |
221 | |
222 | static inline void io_commit_cqring(struct io_ring_ctx *ctx) |
223 | { |
224 | /* order cqe stores with ring update */ |
225 | smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); |
226 | } |
227 | |
228 | static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) |
229 | { |
230 | if (wq_has_sleeper(wq_head: &ctx->poll_wq)) |
231 | __wake_up(wq_head: &ctx->poll_wq, TASK_NORMAL, nr: 0, |
232 | poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); |
233 | } |
234 | |
235 | static inline void io_cqring_wake(struct io_ring_ctx *ctx) |
236 | { |
237 | /* |
238 | * Trigger waitqueue handler on all waiters on our waitqueue. This |
239 | * won't necessarily wake up all the tasks, io_should_wake() will make |
240 | * that decision. |
241 | * |
242 | * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter |
243 | * set in the mask so that if we recurse back into our own poll |
244 | * waitqueue handlers, we know we have a dependency between eventfd or |
245 | * epoll and should terminate multishot poll at that point. |
246 | */ |
247 | if (wq_has_sleeper(wq_head: &ctx->cq_wait)) |
248 | __wake_up(wq_head: &ctx->cq_wait, TASK_NORMAL, nr: 0, |
249 | poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); |
250 | } |
251 | |
252 | static inline bool io_sqring_full(struct io_ring_ctx *ctx) |
253 | { |
254 | struct io_rings *r = ctx->rings; |
255 | |
256 | return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; |
257 | } |
258 | |
259 | static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) |
260 | { |
261 | struct io_rings *rings = ctx->rings; |
262 | unsigned int entries; |
263 | |
264 | /* make sure SQ entry isn't read before tail */ |
265 | entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; |
266 | return min(entries, ctx->sq_entries); |
267 | } |
268 | |
269 | static inline int io_run_task_work(void) |
270 | { |
271 | /* |
272 | * Always check-and-clear the task_work notification signal. With how |
273 | * signaling works for task_work, we can find it set with nothing to |
274 | * run. We need to clear it for that case, like get_signal() does. |
275 | */ |
276 | if (test_thread_flag(TIF_NOTIFY_SIGNAL)) |
277 | clear_notify_signal(); |
278 | /* |
279 | * PF_IO_WORKER never returns to userspace, so check here if we have |
280 | * notify work that needs processing. |
281 | */ |
282 | if (current->flags & PF_IO_WORKER && |
283 | test_thread_flag(TIF_NOTIFY_RESUME)) { |
284 | __set_current_state(TASK_RUNNING); |
285 | resume_user_mode_work(NULL); |
286 | } |
287 | if (task_work_pending(current)) { |
288 | __set_current_state(TASK_RUNNING); |
289 | task_work_run(); |
290 | return 1; |
291 | } |
292 | |
293 | return 0; |
294 | } |
295 | |
296 | static inline bool io_task_work_pending(struct io_ring_ctx *ctx) |
297 | { |
298 | return task_work_pending(current) || !wq_list_empty(&ctx->work_llist); |
299 | } |
300 | |
301 | static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts) |
302 | { |
303 | if (!ts->locked) { |
304 | mutex_lock(&ctx->uring_lock); |
305 | ts->locked = true; |
306 | } |
307 | } |
308 | |
309 | /* |
310 | * Don't complete immediately but use deferred completion infrastructure. |
311 | * Protected by ->uring_lock and can only be used either with |
312 | * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. |
313 | */ |
314 | static inline void io_req_complete_defer(struct io_kiocb *req) |
315 | __must_hold(&req->ctx->uring_lock) |
316 | { |
317 | struct io_submit_state *state = &req->ctx->submit_state; |
318 | |
319 | lockdep_assert_held(&req->ctx->uring_lock); |
320 | |
321 | wq_list_add_tail(node: &req->comp_list, list: &state->compl_reqs); |
322 | } |
323 | |
324 | static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) |
325 | { |
326 | if (unlikely(ctx->off_timeout_used || ctx->drain_active || |
327 | ctx->has_evfd || ctx->poll_activated)) |
328 | __io_commit_cqring_flush(ctx); |
329 | } |
330 | |
331 | static inline void io_get_task_refs(int nr) |
332 | { |
333 | struct io_uring_task *tctx = current->io_uring; |
334 | |
335 | tctx->cached_refs -= nr; |
336 | if (unlikely(tctx->cached_refs < 0)) |
337 | io_task_refs_refill(tctx); |
338 | } |
339 | |
340 | static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) |
341 | { |
342 | return !ctx->submit_state.free_list.next; |
343 | } |
344 | |
345 | extern struct kmem_cache *req_cachep; |
346 | extern struct kmem_cache *io_buf_cachep; |
347 | |
348 | static inline struct io_kiocb *(struct io_ring_ctx *ctx) |
349 | { |
350 | struct io_kiocb *req; |
351 | |
352 | req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list); |
353 | wq_stack_extract(stack: &ctx->submit_state.free_list); |
354 | return req; |
355 | } |
356 | |
357 | static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req) |
358 | { |
359 | if (unlikely(io_req_cache_empty(ctx))) { |
360 | if (!__io_alloc_req_refill(ctx)) |
361 | return false; |
362 | } |
363 | *req = io_extract_req(ctx); |
364 | return true; |
365 | } |
366 | |
367 | static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) |
368 | { |
369 | return likely(ctx->submitter_task == current); |
370 | } |
371 | |
372 | static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) |
373 | { |
374 | return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || |
375 | ctx->submitter_task == current); |
376 | } |
377 | |
378 | static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) |
379 | { |
380 | io_req_set_res(req, res, cflags: 0); |
381 | req->io_task_work.func = io_req_task_complete; |
382 | io_req_task_work_add(req); |
383 | } |
384 | |
385 | /* |
386 | * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each |
387 | * slot. |
388 | */ |
389 | static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) |
390 | { |
391 | if (ctx->flags & IORING_SETUP_SQE128) |
392 | return 2 * sizeof(struct io_uring_sqe); |
393 | return sizeof(struct io_uring_sqe); |
394 | } |
395 | #endif |
396 | |