1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Contains the core associated with submission side polling of the SQ |
4 | * ring, offloading submissions from the application to a kernel thread. |
5 | */ |
6 | #include <linux/kernel.h> |
7 | #include <linux/errno.h> |
8 | #include <linux/file.h> |
9 | #include <linux/mm.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/audit.h> |
12 | #include <linux/security.h> |
13 | #include <linux/io_uring.h> |
14 | |
15 | #include <uapi/linux/io_uring.h> |
16 | |
17 | #include "io_uring.h" |
18 | #include "sqpoll.h" |
19 | |
20 | #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8 |
21 | |
22 | enum { |
23 | IO_SQ_THREAD_SHOULD_STOP = 0, |
24 | IO_SQ_THREAD_SHOULD_PARK, |
25 | }; |
26 | |
27 | void io_sq_thread_unpark(struct io_sq_data *sqd) |
28 | __releases(&sqd->lock) |
29 | { |
30 | WARN_ON_ONCE(sqd->thread == current); |
31 | |
32 | /* |
33 | * Do the dance but not conditional clear_bit() because it'd race with |
34 | * other threads incrementing park_pending and setting the bit. |
35 | */ |
36 | clear_bit(nr: IO_SQ_THREAD_SHOULD_PARK, addr: &sqd->state); |
37 | if (atomic_dec_return(v: &sqd->park_pending)) |
38 | set_bit(nr: IO_SQ_THREAD_SHOULD_PARK, addr: &sqd->state); |
39 | mutex_unlock(lock: &sqd->lock); |
40 | } |
41 | |
42 | void io_sq_thread_park(struct io_sq_data *sqd) |
43 | __acquires(&sqd->lock) |
44 | { |
45 | WARN_ON_ONCE(sqd->thread == current); |
46 | |
47 | atomic_inc(v: &sqd->park_pending); |
48 | set_bit(nr: IO_SQ_THREAD_SHOULD_PARK, addr: &sqd->state); |
49 | mutex_lock(&sqd->lock); |
50 | if (sqd->thread) |
51 | wake_up_process(tsk: sqd->thread); |
52 | } |
53 | |
54 | void io_sq_thread_stop(struct io_sq_data *sqd) |
55 | { |
56 | WARN_ON_ONCE(sqd->thread == current); |
57 | WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); |
58 | |
59 | set_bit(nr: IO_SQ_THREAD_SHOULD_STOP, addr: &sqd->state); |
60 | mutex_lock(&sqd->lock); |
61 | if (sqd->thread) |
62 | wake_up_process(tsk: sqd->thread); |
63 | mutex_unlock(lock: &sqd->lock); |
64 | wait_for_completion(&sqd->exited); |
65 | } |
66 | |
67 | void io_put_sq_data(struct io_sq_data *sqd) |
68 | { |
69 | if (refcount_dec_and_test(r: &sqd->refs)) { |
70 | WARN_ON_ONCE(atomic_read(&sqd->park_pending)); |
71 | |
72 | io_sq_thread_stop(sqd); |
73 | kfree(objp: sqd); |
74 | } |
75 | } |
76 | |
77 | static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) |
78 | { |
79 | struct io_ring_ctx *ctx; |
80 | unsigned sq_thread_idle = 0; |
81 | |
82 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) |
83 | sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); |
84 | sqd->sq_thread_idle = sq_thread_idle; |
85 | } |
86 | |
87 | void io_sq_thread_finish(struct io_ring_ctx *ctx) |
88 | { |
89 | struct io_sq_data *sqd = ctx->sq_data; |
90 | |
91 | if (sqd) { |
92 | io_sq_thread_park(sqd); |
93 | list_del_init(entry: &ctx->sqd_list); |
94 | io_sqd_update_thread_idle(sqd); |
95 | io_sq_thread_unpark(sqd); |
96 | |
97 | io_put_sq_data(sqd); |
98 | ctx->sq_data = NULL; |
99 | } |
100 | } |
101 | |
102 | static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) |
103 | { |
104 | struct io_ring_ctx *ctx_attach; |
105 | struct io_sq_data *sqd; |
106 | struct fd f; |
107 | |
108 | f = fdget(fd: p->wq_fd); |
109 | if (!f.file) |
110 | return ERR_PTR(error: -ENXIO); |
111 | if (!io_is_uring_fops(file: f.file)) { |
112 | fdput(fd: f); |
113 | return ERR_PTR(error: -EINVAL); |
114 | } |
115 | |
116 | ctx_attach = f.file->private_data; |
117 | sqd = ctx_attach->sq_data; |
118 | if (!sqd) { |
119 | fdput(fd: f); |
120 | return ERR_PTR(error: -EINVAL); |
121 | } |
122 | if (sqd->task_tgid != current->tgid) { |
123 | fdput(fd: f); |
124 | return ERR_PTR(error: -EPERM); |
125 | } |
126 | |
127 | refcount_inc(r: &sqd->refs); |
128 | fdput(fd: f); |
129 | return sqd; |
130 | } |
131 | |
132 | static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, |
133 | bool *attached) |
134 | { |
135 | struct io_sq_data *sqd; |
136 | |
137 | *attached = false; |
138 | if (p->flags & IORING_SETUP_ATTACH_WQ) { |
139 | sqd = io_attach_sq_data(p); |
140 | if (!IS_ERR(ptr: sqd)) { |
141 | *attached = true; |
142 | return sqd; |
143 | } |
144 | /* fall through for EPERM case, setup new sqd/task */ |
145 | if (PTR_ERR(ptr: sqd) != -EPERM) |
146 | return sqd; |
147 | } |
148 | |
149 | sqd = kzalloc(size: sizeof(*sqd), GFP_KERNEL); |
150 | if (!sqd) |
151 | return ERR_PTR(error: -ENOMEM); |
152 | |
153 | atomic_set(v: &sqd->park_pending, i: 0); |
154 | refcount_set(r: &sqd->refs, n: 1); |
155 | INIT_LIST_HEAD(list: &sqd->ctx_list); |
156 | mutex_init(&sqd->lock); |
157 | init_waitqueue_head(&sqd->wait); |
158 | init_completion(x: &sqd->exited); |
159 | return sqd; |
160 | } |
161 | |
162 | static inline bool io_sqd_events_pending(struct io_sq_data *sqd) |
163 | { |
164 | return READ_ONCE(sqd->state); |
165 | } |
166 | |
167 | static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) |
168 | { |
169 | unsigned int to_submit; |
170 | int ret = 0; |
171 | |
172 | to_submit = io_sqring_entries(ctx); |
173 | /* if we're handling multiple rings, cap submit size for fairness */ |
174 | if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE) |
175 | to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE; |
176 | |
177 | if (!wq_list_empty(&ctx->iopoll_list) || to_submit) { |
178 | const struct cred *creds = NULL; |
179 | |
180 | if (ctx->sq_creds != current_cred()) |
181 | creds = override_creds(ctx->sq_creds); |
182 | |
183 | mutex_lock(&ctx->uring_lock); |
184 | if (!wq_list_empty(&ctx->iopoll_list)) |
185 | io_do_iopoll(ctx, force_nonspin: true); |
186 | |
187 | /* |
188 | * Don't submit if refs are dying, good for io_uring_register(), |
189 | * but also it is relied upon by io_ring_exit_work() |
190 | */ |
191 | if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) && |
192 | !(ctx->flags & IORING_SETUP_R_DISABLED)) |
193 | ret = io_submit_sqes(ctx, nr: to_submit); |
194 | mutex_unlock(lock: &ctx->uring_lock); |
195 | |
196 | if (to_submit && wq_has_sleeper(wq_head: &ctx->sqo_sq_wait)) |
197 | wake_up(&ctx->sqo_sq_wait); |
198 | if (creds) |
199 | revert_creds(creds); |
200 | } |
201 | |
202 | return ret; |
203 | } |
204 | |
205 | static bool io_sqd_handle_event(struct io_sq_data *sqd) |
206 | { |
207 | bool did_sig = false; |
208 | struct ksignal ksig; |
209 | |
210 | if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || |
211 | signal_pending(current)) { |
212 | mutex_unlock(lock: &sqd->lock); |
213 | if (signal_pending(current)) |
214 | did_sig = get_signal(ksig: &ksig); |
215 | cond_resched(); |
216 | mutex_lock(&sqd->lock); |
217 | } |
218 | return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); |
219 | } |
220 | |
221 | static int io_sq_thread(void *data) |
222 | { |
223 | struct io_sq_data *sqd = data; |
224 | struct io_ring_ctx *ctx; |
225 | unsigned long timeout = 0; |
226 | char buf[TASK_COMM_LEN]; |
227 | DEFINE_WAIT(wait); |
228 | |
229 | snprintf(buf, size: sizeof(buf), fmt: "iou-sqp-%d" , sqd->task_pid); |
230 | set_task_comm(current, from: buf); |
231 | |
232 | if (sqd->sq_cpu != -1) |
233 | set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); |
234 | else |
235 | set_cpus_allowed_ptr(current, cpu_online_mask); |
236 | |
237 | mutex_lock(&sqd->lock); |
238 | while (1) { |
239 | bool cap_entries, sqt_spin = false; |
240 | |
241 | if (io_sqd_events_pending(sqd) || signal_pending(current)) { |
242 | if (io_sqd_handle_event(sqd)) |
243 | break; |
244 | timeout = jiffies + sqd->sq_thread_idle; |
245 | } |
246 | |
247 | cap_entries = !list_is_singular(head: &sqd->ctx_list); |
248 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { |
249 | int ret = __io_sq_thread(ctx, cap_entries); |
250 | |
251 | if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list))) |
252 | sqt_spin = true; |
253 | } |
254 | if (io_run_task_work()) |
255 | sqt_spin = true; |
256 | |
257 | if (sqt_spin || !time_after(jiffies, timeout)) { |
258 | if (sqt_spin) |
259 | timeout = jiffies + sqd->sq_thread_idle; |
260 | if (unlikely(need_resched())) { |
261 | mutex_unlock(lock: &sqd->lock); |
262 | cond_resched(); |
263 | mutex_lock(&sqd->lock); |
264 | } |
265 | continue; |
266 | } |
267 | |
268 | prepare_to_wait(wq_head: &sqd->wait, wq_entry: &wait, TASK_INTERRUPTIBLE); |
269 | if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) { |
270 | bool needs_sched = true; |
271 | |
272 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { |
273 | atomic_or(IORING_SQ_NEED_WAKEUP, |
274 | v: &ctx->rings->sq_flags); |
275 | if ((ctx->flags & IORING_SETUP_IOPOLL) && |
276 | !wq_list_empty(&ctx->iopoll_list)) { |
277 | needs_sched = false; |
278 | break; |
279 | } |
280 | |
281 | /* |
282 | * Ensure the store of the wakeup flag is not |
283 | * reordered with the load of the SQ tail |
284 | */ |
285 | smp_mb__after_atomic(); |
286 | |
287 | if (io_sqring_entries(ctx)) { |
288 | needs_sched = false; |
289 | break; |
290 | } |
291 | } |
292 | |
293 | if (needs_sched) { |
294 | mutex_unlock(lock: &sqd->lock); |
295 | schedule(); |
296 | mutex_lock(&sqd->lock); |
297 | } |
298 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) |
299 | atomic_andnot(IORING_SQ_NEED_WAKEUP, |
300 | v: &ctx->rings->sq_flags); |
301 | } |
302 | |
303 | finish_wait(wq_head: &sqd->wait, wq_entry: &wait); |
304 | timeout = jiffies + sqd->sq_thread_idle; |
305 | } |
306 | |
307 | io_uring_cancel_generic(cancel_all: true, sqd); |
308 | sqd->thread = NULL; |
309 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) |
310 | atomic_or(IORING_SQ_NEED_WAKEUP, v: &ctx->rings->sq_flags); |
311 | io_run_task_work(); |
312 | mutex_unlock(lock: &sqd->lock); |
313 | |
314 | complete(&sqd->exited); |
315 | do_exit(error_code: 0); |
316 | } |
317 | |
318 | void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) |
319 | { |
320 | DEFINE_WAIT(wait); |
321 | |
322 | do { |
323 | if (!io_sqring_full(ctx)) |
324 | break; |
325 | prepare_to_wait(wq_head: &ctx->sqo_sq_wait, wq_entry: &wait, TASK_INTERRUPTIBLE); |
326 | |
327 | if (!io_sqring_full(ctx)) |
328 | break; |
329 | schedule(); |
330 | } while (!signal_pending(current)); |
331 | |
332 | finish_wait(wq_head: &ctx->sqo_sq_wait, wq_entry: &wait); |
333 | } |
334 | |
335 | __cold int io_sq_offload_create(struct io_ring_ctx *ctx, |
336 | struct io_uring_params *p) |
337 | { |
338 | int ret; |
339 | |
340 | /* Retain compatibility with failing for an invalid attach attempt */ |
341 | if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == |
342 | IORING_SETUP_ATTACH_WQ) { |
343 | struct fd f; |
344 | |
345 | f = fdget(fd: p->wq_fd); |
346 | if (!f.file) |
347 | return -ENXIO; |
348 | if (!io_is_uring_fops(file: f.file)) { |
349 | fdput(fd: f); |
350 | return -EINVAL; |
351 | } |
352 | fdput(fd: f); |
353 | } |
354 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
355 | struct task_struct *tsk; |
356 | struct io_sq_data *sqd; |
357 | bool attached; |
358 | |
359 | ret = security_uring_sqpoll(); |
360 | if (ret) |
361 | return ret; |
362 | |
363 | sqd = io_get_sq_data(p, attached: &attached); |
364 | if (IS_ERR(ptr: sqd)) { |
365 | ret = PTR_ERR(ptr: sqd); |
366 | goto err; |
367 | } |
368 | |
369 | ctx->sq_creds = get_current_cred(); |
370 | ctx->sq_data = sqd; |
371 | ctx->sq_thread_idle = msecs_to_jiffies(m: p->sq_thread_idle); |
372 | if (!ctx->sq_thread_idle) |
373 | ctx->sq_thread_idle = HZ; |
374 | |
375 | io_sq_thread_park(sqd); |
376 | list_add(new: &ctx->sqd_list, head: &sqd->ctx_list); |
377 | io_sqd_update_thread_idle(sqd); |
378 | /* don't attach to a dying SQPOLL thread, would be racy */ |
379 | ret = (attached && !sqd->thread) ? -ENXIO : 0; |
380 | io_sq_thread_unpark(sqd); |
381 | |
382 | if (ret < 0) |
383 | goto err; |
384 | if (attached) |
385 | return 0; |
386 | |
387 | if (p->flags & IORING_SETUP_SQ_AFF) { |
388 | int cpu = p->sq_thread_cpu; |
389 | |
390 | ret = -EINVAL; |
391 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) |
392 | goto err_sqpoll; |
393 | sqd->sq_cpu = cpu; |
394 | } else { |
395 | sqd->sq_cpu = -1; |
396 | } |
397 | |
398 | sqd->task_pid = current->pid; |
399 | sqd->task_tgid = current->tgid; |
400 | tsk = create_io_thread(fn: io_sq_thread, arg: sqd, NUMA_NO_NODE); |
401 | if (IS_ERR(ptr: tsk)) { |
402 | ret = PTR_ERR(ptr: tsk); |
403 | goto err_sqpoll; |
404 | } |
405 | |
406 | sqd->thread = tsk; |
407 | ret = io_uring_alloc_task_context(task: tsk, ctx); |
408 | wake_up_new_task(tsk); |
409 | if (ret) |
410 | goto err; |
411 | } else if (p->flags & IORING_SETUP_SQ_AFF) { |
412 | /* Can't have SQ_AFF without SQPOLL */ |
413 | ret = -EINVAL; |
414 | goto err; |
415 | } |
416 | |
417 | return 0; |
418 | err_sqpoll: |
419 | complete(&ctx->sq_data->exited); |
420 | err: |
421 | io_sq_thread_finish(ctx); |
422 | return ret; |
423 | } |
424 | |
425 | __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, |
426 | cpumask_var_t mask) |
427 | { |
428 | struct io_sq_data *sqd = ctx->sq_data; |
429 | int ret = -EINVAL; |
430 | |
431 | if (sqd) { |
432 | io_sq_thread_park(sqd); |
433 | /* Don't set affinity for a dying thread */ |
434 | if (sqd->thread) |
435 | ret = io_wq_cpu_affinity(tctx: sqd->thread->io_uring, mask); |
436 | io_sq_thread_unpark(sqd); |
437 | } |
438 | |
439 | return ret; |
440 | } |
441 | |