1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> |
3 | #include <linux/errno.h> |
4 | #include <linux/file.h> |
5 | #include <linux/slab.h> |
6 | #include <linux/net.h> |
7 | #include <linux/compat.h> |
8 | #include <net/compat.h> |
9 | #include <linux/io_uring.h> |
10 | |
11 | #include <uapi/linux/io_uring.h> |
12 | |
13 | #include "io_uring.h" |
14 | #include "kbuf.h" |
15 | #include "alloc_cache.h" |
16 | #include "net.h" |
17 | #include "notif.h" |
18 | #include "rsrc.h" |
19 | |
20 | #if defined(CONFIG_NET) |
21 | struct io_shutdown { |
22 | struct file *file; |
23 | int how; |
24 | }; |
25 | |
26 | struct io_accept { |
27 | struct file *file; |
28 | struct sockaddr __user *addr; |
29 | int __user *addr_len; |
30 | int flags; |
31 | u32 file_slot; |
32 | unsigned long nofile; |
33 | }; |
34 | |
35 | struct io_socket { |
36 | struct file *file; |
37 | int domain; |
38 | int type; |
39 | int protocol; |
40 | int flags; |
41 | u32 file_slot; |
42 | unsigned long nofile; |
43 | }; |
44 | |
45 | struct io_connect { |
46 | struct file *file; |
47 | struct sockaddr __user *addr; |
48 | int addr_len; |
49 | bool in_progress; |
50 | bool seen_econnaborted; |
51 | }; |
52 | |
53 | struct io_sr_msg { |
54 | struct file *file; |
55 | union { |
56 | struct compat_msghdr __user *umsg_compat; |
57 | struct user_msghdr __user *umsg; |
58 | void __user *buf; |
59 | }; |
60 | unsigned len; |
61 | unsigned done_io; |
62 | unsigned msg_flags; |
63 | u16 flags; |
64 | /* initialised and used only by !msg send variants */ |
65 | u16 addr_len; |
66 | u16 buf_group; |
67 | void __user *addr; |
68 | void __user *msg_control; |
69 | /* used only for send zerocopy */ |
70 | struct io_kiocb *notif; |
71 | }; |
72 | |
73 | static inline bool io_check_multishot(struct io_kiocb *req, |
74 | unsigned int issue_flags) |
75 | { |
76 | /* |
77 | * When ->locked_cq is set we only allow to post CQEs from the original |
78 | * task context. Usual request completions will be handled in other |
79 | * generic paths but multipoll may decide to post extra cqes. |
80 | */ |
81 | return !(issue_flags & IO_URING_F_IOWQ) || |
82 | !(issue_flags & IO_URING_F_MULTISHOT) || |
83 | !req->ctx->task_complete; |
84 | } |
85 | |
86 | int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
87 | { |
88 | struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); |
89 | |
90 | if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || |
91 | sqe->buf_index || sqe->splice_fd_in)) |
92 | return -EINVAL; |
93 | |
94 | shutdown->how = READ_ONCE(sqe->len); |
95 | req->flags |= REQ_F_FORCE_ASYNC; |
96 | return 0; |
97 | } |
98 | |
99 | int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) |
100 | { |
101 | struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); |
102 | struct socket *sock; |
103 | int ret; |
104 | |
105 | WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); |
106 | |
107 | sock = sock_from_file(file: req->file); |
108 | if (unlikely(!sock)) |
109 | return -ENOTSOCK; |
110 | |
111 | ret = __sys_shutdown_sock(sock, how: shutdown->how); |
112 | io_req_set_res(req, res: ret, cflags: 0); |
113 | return IOU_OK; |
114 | } |
115 | |
116 | static bool io_net_retry(struct socket *sock, int flags) |
117 | { |
118 | if (!(flags & MSG_WAITALL)) |
119 | return false; |
120 | return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET; |
121 | } |
122 | |
123 | static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) |
124 | { |
125 | struct io_async_msghdr *hdr = req->async_data; |
126 | |
127 | if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED) |
128 | return; |
129 | |
130 | /* Let normal cleanup path reap it if we fail adding to the cache */ |
131 | if (io_alloc_cache_put(cache: &req->ctx->netmsg_cache, entry: &hdr->cache)) { |
132 | req->async_data = NULL; |
133 | req->flags &= ~REQ_F_ASYNC_DATA; |
134 | } |
135 | } |
136 | |
137 | static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req, |
138 | unsigned int issue_flags) |
139 | { |
140 | struct io_ring_ctx *ctx = req->ctx; |
141 | struct io_cache_entry *entry; |
142 | struct io_async_msghdr *hdr; |
143 | |
144 | if (!(issue_flags & IO_URING_F_UNLOCKED)) { |
145 | entry = io_alloc_cache_get(cache: &ctx->netmsg_cache); |
146 | if (entry) { |
147 | hdr = container_of(entry, struct io_async_msghdr, cache); |
148 | hdr->free_iov = NULL; |
149 | req->flags |= REQ_F_ASYNC_DATA; |
150 | req->async_data = hdr; |
151 | return hdr; |
152 | } |
153 | } |
154 | |
155 | if (!io_alloc_async_data(req)) { |
156 | hdr = req->async_data; |
157 | hdr->free_iov = NULL; |
158 | return hdr; |
159 | } |
160 | return NULL; |
161 | } |
162 | |
163 | static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req) |
164 | { |
165 | /* ->prep_async is always called from the submission context */ |
166 | return io_msg_alloc_async(req, issue_flags: 0); |
167 | } |
168 | |
169 | static int io_setup_async_msg(struct io_kiocb *req, |
170 | struct io_async_msghdr *kmsg, |
171 | unsigned int issue_flags) |
172 | { |
173 | struct io_async_msghdr *async_msg; |
174 | |
175 | if (req_has_async_data(req)) |
176 | return -EAGAIN; |
177 | async_msg = io_msg_alloc_async(req, issue_flags); |
178 | if (!async_msg) { |
179 | kfree(objp: kmsg->free_iov); |
180 | return -ENOMEM; |
181 | } |
182 | req->flags |= REQ_F_NEED_CLEANUP; |
183 | memcpy(async_msg, kmsg, sizeof(*kmsg)); |
184 | if (async_msg->msg.msg_name) |
185 | async_msg->msg.msg_name = &async_msg->addr; |
186 | |
187 | if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs) |
188 | return -EAGAIN; |
189 | |
190 | /* if were using fast_iov, set it to the new one */ |
191 | if (iter_is_iovec(i: &kmsg->msg.msg_iter) && !kmsg->free_iov) { |
192 | size_t fast_idx = iter_iov(iter: &kmsg->msg.msg_iter) - kmsg->fast_iov; |
193 | async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx]; |
194 | } |
195 | |
196 | return -EAGAIN; |
197 | } |
198 | |
199 | static int io_sendmsg_copy_hdr(struct io_kiocb *req, |
200 | struct io_async_msghdr *iomsg) |
201 | { |
202 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
203 | int ret; |
204 | |
205 | iomsg->msg.msg_name = &iomsg->addr; |
206 | iomsg->free_iov = iomsg->fast_iov; |
207 | ret = sendmsg_copy_msghdr(msg: &iomsg->msg, umsg: sr->umsg, flags: sr->msg_flags, |
208 | iov: &iomsg->free_iov); |
209 | /* save msg_control as sys_sendmsg() overwrites it */ |
210 | sr->msg_control = iomsg->msg.msg_control_user; |
211 | return ret; |
212 | } |
213 | |
214 | int io_send_prep_async(struct io_kiocb *req) |
215 | { |
216 | struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); |
217 | struct io_async_msghdr *io; |
218 | int ret; |
219 | |
220 | if (!zc->addr || req_has_async_data(req)) |
221 | return 0; |
222 | io = io_msg_alloc_async_prep(req); |
223 | if (!io) |
224 | return -ENOMEM; |
225 | ret = move_addr_to_kernel(uaddr: zc->addr, ulen: zc->addr_len, kaddr: &io->addr); |
226 | return ret; |
227 | } |
228 | |
229 | static int io_setup_async_addr(struct io_kiocb *req, |
230 | struct sockaddr_storage *addr_storage, |
231 | unsigned int issue_flags) |
232 | { |
233 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
234 | struct io_async_msghdr *io; |
235 | |
236 | if (!sr->addr || req_has_async_data(req)) |
237 | return -EAGAIN; |
238 | io = io_msg_alloc_async(req, issue_flags); |
239 | if (!io) |
240 | return -ENOMEM; |
241 | memcpy(&io->addr, addr_storage, sizeof(io->addr)); |
242 | return -EAGAIN; |
243 | } |
244 | |
245 | int io_sendmsg_prep_async(struct io_kiocb *req) |
246 | { |
247 | int ret; |
248 | |
249 | if (!io_msg_alloc_async_prep(req)) |
250 | return -ENOMEM; |
251 | ret = io_sendmsg_copy_hdr(req, iomsg: req->async_data); |
252 | if (!ret) |
253 | req->flags |= REQ_F_NEED_CLEANUP; |
254 | return ret; |
255 | } |
256 | |
257 | void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req) |
258 | { |
259 | struct io_async_msghdr *io = req->async_data; |
260 | |
261 | kfree(objp: io->free_iov); |
262 | } |
263 | |
264 | int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
265 | { |
266 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
267 | |
268 | if (req->opcode == IORING_OP_SEND) { |
269 | if (READ_ONCE(sqe->__pad3[0])) |
270 | return -EINVAL; |
271 | sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); |
272 | sr->addr_len = READ_ONCE(sqe->addr_len); |
273 | } else if (sqe->addr2 || sqe->file_index) { |
274 | return -EINVAL; |
275 | } |
276 | |
277 | sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
278 | sr->len = READ_ONCE(sqe->len); |
279 | sr->flags = READ_ONCE(sqe->ioprio); |
280 | if (sr->flags & ~IORING_RECVSEND_POLL_FIRST) |
281 | return -EINVAL; |
282 | sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; |
283 | if (sr->msg_flags & MSG_DONTWAIT) |
284 | req->flags |= REQ_F_NOWAIT; |
285 | |
286 | #ifdef CONFIG_COMPAT |
287 | if (req->ctx->compat) |
288 | sr->msg_flags |= MSG_CMSG_COMPAT; |
289 | #endif |
290 | sr->done_io = 0; |
291 | return 0; |
292 | } |
293 | |
294 | int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) |
295 | { |
296 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
297 | struct io_async_msghdr iomsg, *kmsg; |
298 | struct socket *sock; |
299 | unsigned flags; |
300 | int min_ret = 0; |
301 | int ret; |
302 | |
303 | sock = sock_from_file(file: req->file); |
304 | if (unlikely(!sock)) |
305 | return -ENOTSOCK; |
306 | |
307 | if (req_has_async_data(req)) { |
308 | kmsg = req->async_data; |
309 | kmsg->msg.msg_control_user = sr->msg_control; |
310 | } else { |
311 | ret = io_sendmsg_copy_hdr(req, iomsg: &iomsg); |
312 | if (ret) |
313 | return ret; |
314 | kmsg = &iomsg; |
315 | } |
316 | |
317 | if (!(req->flags & REQ_F_POLLED) && |
318 | (sr->flags & IORING_RECVSEND_POLL_FIRST)) |
319 | return io_setup_async_msg(req, kmsg, issue_flags); |
320 | |
321 | flags = sr->msg_flags; |
322 | if (issue_flags & IO_URING_F_NONBLOCK) |
323 | flags |= MSG_DONTWAIT; |
324 | if (flags & MSG_WAITALL) |
325 | min_ret = iov_iter_count(i: &kmsg->msg.msg_iter); |
326 | |
327 | ret = __sys_sendmsg_sock(sock, msg: &kmsg->msg, flags); |
328 | |
329 | if (ret < min_ret) { |
330 | if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) |
331 | return io_setup_async_msg(req, kmsg, issue_flags); |
332 | if (ret > 0 && io_net_retry(sock, flags)) { |
333 | kmsg->msg.msg_controllen = 0; |
334 | kmsg->msg.msg_control = NULL; |
335 | sr->done_io += ret; |
336 | req->flags |= REQ_F_PARTIAL_IO; |
337 | return io_setup_async_msg(req, kmsg, issue_flags); |
338 | } |
339 | if (ret == -ERESTARTSYS) |
340 | ret = -EINTR; |
341 | req_set_fail(req); |
342 | } |
343 | /* fast path, check for non-NULL to avoid function call */ |
344 | if (kmsg->free_iov) |
345 | kfree(objp: kmsg->free_iov); |
346 | req->flags &= ~REQ_F_NEED_CLEANUP; |
347 | io_netmsg_recycle(req, issue_flags); |
348 | if (ret >= 0) |
349 | ret += sr->done_io; |
350 | else if (sr->done_io) |
351 | ret = sr->done_io; |
352 | io_req_set_res(req, res: ret, cflags: 0); |
353 | return IOU_OK; |
354 | } |
355 | |
356 | int io_send(struct io_kiocb *req, unsigned int issue_flags) |
357 | { |
358 | struct sockaddr_storage __address; |
359 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
360 | struct msghdr msg; |
361 | struct socket *sock; |
362 | unsigned flags; |
363 | int min_ret = 0; |
364 | int ret; |
365 | |
366 | msg.msg_name = NULL; |
367 | msg.msg_control = NULL; |
368 | msg.msg_controllen = 0; |
369 | msg.msg_namelen = 0; |
370 | msg.msg_ubuf = NULL; |
371 | |
372 | if (sr->addr) { |
373 | if (req_has_async_data(req)) { |
374 | struct io_async_msghdr *io = req->async_data; |
375 | |
376 | msg.msg_name = &io->addr; |
377 | } else { |
378 | ret = move_addr_to_kernel(uaddr: sr->addr, ulen: sr->addr_len, kaddr: &__address); |
379 | if (unlikely(ret < 0)) |
380 | return ret; |
381 | msg.msg_name = (struct sockaddr *)&__address; |
382 | } |
383 | msg.msg_namelen = sr->addr_len; |
384 | } |
385 | |
386 | if (!(req->flags & REQ_F_POLLED) && |
387 | (sr->flags & IORING_RECVSEND_POLL_FIRST)) |
388 | return io_setup_async_addr(req, addr_storage: &__address, issue_flags); |
389 | |
390 | sock = sock_from_file(file: req->file); |
391 | if (unlikely(!sock)) |
392 | return -ENOTSOCK; |
393 | |
394 | ret = import_ubuf(ITER_SOURCE, buf: sr->buf, len: sr->len, i: &msg.msg_iter); |
395 | if (unlikely(ret)) |
396 | return ret; |
397 | |
398 | flags = sr->msg_flags; |
399 | if (issue_flags & IO_URING_F_NONBLOCK) |
400 | flags |= MSG_DONTWAIT; |
401 | if (flags & MSG_WAITALL) |
402 | min_ret = iov_iter_count(i: &msg.msg_iter); |
403 | |
404 | flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; |
405 | msg.msg_flags = flags; |
406 | ret = sock_sendmsg(sock, msg: &msg); |
407 | if (ret < min_ret) { |
408 | if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) |
409 | return io_setup_async_addr(req, addr_storage: &__address, issue_flags); |
410 | |
411 | if (ret > 0 && io_net_retry(sock, flags)) { |
412 | sr->len -= ret; |
413 | sr->buf += ret; |
414 | sr->done_io += ret; |
415 | req->flags |= REQ_F_PARTIAL_IO; |
416 | return io_setup_async_addr(req, addr_storage: &__address, issue_flags); |
417 | } |
418 | if (ret == -ERESTARTSYS) |
419 | ret = -EINTR; |
420 | req_set_fail(req); |
421 | } |
422 | if (ret >= 0) |
423 | ret += sr->done_io; |
424 | else if (sr->done_io) |
425 | ret = sr->done_io; |
426 | io_req_set_res(req, res: ret, cflags: 0); |
427 | return IOU_OK; |
428 | } |
429 | |
430 | static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg) |
431 | { |
432 | int hdr; |
433 | |
434 | if (iomsg->namelen < 0) |
435 | return true; |
436 | if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), |
437 | iomsg->namelen, &hdr)) |
438 | return true; |
439 | if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr)) |
440 | return true; |
441 | |
442 | return false; |
443 | } |
444 | |
445 | static int __io_recvmsg_copy_hdr(struct io_kiocb *req, |
446 | struct io_async_msghdr *iomsg) |
447 | { |
448 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
449 | struct user_msghdr msg; |
450 | int ret; |
451 | |
452 | if (copy_from_user(to: &msg, from: sr->umsg, n: sizeof(*sr->umsg))) |
453 | return -EFAULT; |
454 | |
455 | ret = __copy_msghdr(kmsg: &iomsg->msg, umsg: &msg, save_addr: &iomsg->uaddr); |
456 | if (ret) |
457 | return ret; |
458 | |
459 | if (req->flags & REQ_F_BUFFER_SELECT) { |
460 | if (msg.msg_iovlen == 0) { |
461 | sr->len = iomsg->fast_iov[0].iov_len = 0; |
462 | iomsg->fast_iov[0].iov_base = NULL; |
463 | iomsg->free_iov = NULL; |
464 | } else if (msg.msg_iovlen > 1) { |
465 | return -EINVAL; |
466 | } else { |
467 | if (copy_from_user(to: iomsg->fast_iov, from: msg.msg_iov, n: sizeof(*msg.msg_iov))) |
468 | return -EFAULT; |
469 | sr->len = iomsg->fast_iov[0].iov_len; |
470 | iomsg->free_iov = NULL; |
471 | } |
472 | |
473 | if (req->flags & REQ_F_APOLL_MULTISHOT) { |
474 | iomsg->namelen = msg.msg_namelen; |
475 | iomsg->controllen = msg.msg_controllen; |
476 | if (io_recvmsg_multishot_overflow(iomsg)) |
477 | return -EOVERFLOW; |
478 | } |
479 | } else { |
480 | iomsg->free_iov = iomsg->fast_iov; |
481 | ret = __import_iovec(ITER_DEST, uvec: msg.msg_iov, nr_segs: msg.msg_iovlen, UIO_FASTIOV, |
482 | iovp: &iomsg->free_iov, i: &iomsg->msg.msg_iter, |
483 | compat: false); |
484 | if (ret > 0) |
485 | ret = 0; |
486 | } |
487 | |
488 | return ret; |
489 | } |
490 | |
491 | #ifdef CONFIG_COMPAT |
492 | static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, |
493 | struct io_async_msghdr *iomsg) |
494 | { |
495 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
496 | struct compat_msghdr msg; |
497 | struct compat_iovec __user *uiov; |
498 | int ret; |
499 | |
500 | if (copy_from_user(to: &msg, from: sr->umsg_compat, n: sizeof(msg))) |
501 | return -EFAULT; |
502 | |
503 | ret = __get_compat_msghdr(kmsg: &iomsg->msg, msg: &msg, save_addr: &iomsg->uaddr); |
504 | if (ret) |
505 | return ret; |
506 | |
507 | uiov = compat_ptr(uptr: msg.msg_iov); |
508 | if (req->flags & REQ_F_BUFFER_SELECT) { |
509 | compat_ssize_t clen; |
510 | |
511 | iomsg->free_iov = NULL; |
512 | if (msg.msg_iovlen == 0) { |
513 | sr->len = 0; |
514 | } else if (msg.msg_iovlen > 1) { |
515 | return -EINVAL; |
516 | } else { |
517 | if (!access_ok(uiov, sizeof(*uiov))) |
518 | return -EFAULT; |
519 | if (__get_user(clen, &uiov->iov_len)) |
520 | return -EFAULT; |
521 | if (clen < 0) |
522 | return -EINVAL; |
523 | sr->len = clen; |
524 | } |
525 | |
526 | if (req->flags & REQ_F_APOLL_MULTISHOT) { |
527 | iomsg->namelen = msg.msg_namelen; |
528 | iomsg->controllen = msg.msg_controllen; |
529 | if (io_recvmsg_multishot_overflow(iomsg)) |
530 | return -EOVERFLOW; |
531 | } |
532 | } else { |
533 | iomsg->free_iov = iomsg->fast_iov; |
534 | ret = __import_iovec(ITER_DEST, uvec: (struct iovec __user *)uiov, nr_segs: msg.msg_iovlen, |
535 | UIO_FASTIOV, iovp: &iomsg->free_iov, |
536 | i: &iomsg->msg.msg_iter, compat: true); |
537 | if (ret < 0) |
538 | return ret; |
539 | } |
540 | |
541 | return 0; |
542 | } |
543 | #endif |
544 | |
545 | static int io_recvmsg_copy_hdr(struct io_kiocb *req, |
546 | struct io_async_msghdr *iomsg) |
547 | { |
548 | iomsg->msg.msg_name = &iomsg->addr; |
549 | iomsg->msg.msg_iter.nr_segs = 0; |
550 | |
551 | #ifdef CONFIG_COMPAT |
552 | if (req->ctx->compat) |
553 | return __io_compat_recvmsg_copy_hdr(req, iomsg); |
554 | #endif |
555 | |
556 | return __io_recvmsg_copy_hdr(req, iomsg); |
557 | } |
558 | |
559 | int io_recvmsg_prep_async(struct io_kiocb *req) |
560 | { |
561 | int ret; |
562 | |
563 | if (!io_msg_alloc_async_prep(req)) |
564 | return -ENOMEM; |
565 | ret = io_recvmsg_copy_hdr(req, iomsg: req->async_data); |
566 | if (!ret) |
567 | req->flags |= REQ_F_NEED_CLEANUP; |
568 | return ret; |
569 | } |
570 | |
571 | #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT) |
572 | |
573 | int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
574 | { |
575 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
576 | |
577 | if (unlikely(sqe->file_index || sqe->addr2)) |
578 | return -EINVAL; |
579 | |
580 | sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
581 | sr->len = READ_ONCE(sqe->len); |
582 | sr->flags = READ_ONCE(sqe->ioprio); |
583 | if (sr->flags & ~(RECVMSG_FLAGS)) |
584 | return -EINVAL; |
585 | sr->msg_flags = READ_ONCE(sqe->msg_flags); |
586 | if (sr->msg_flags & MSG_DONTWAIT) |
587 | req->flags |= REQ_F_NOWAIT; |
588 | if (sr->msg_flags & MSG_ERRQUEUE) |
589 | req->flags |= REQ_F_CLEAR_POLLIN; |
590 | if (sr->flags & IORING_RECV_MULTISHOT) { |
591 | if (!(req->flags & REQ_F_BUFFER_SELECT)) |
592 | return -EINVAL; |
593 | if (sr->msg_flags & MSG_WAITALL) |
594 | return -EINVAL; |
595 | if (req->opcode == IORING_OP_RECV && sr->len) |
596 | return -EINVAL; |
597 | req->flags |= REQ_F_APOLL_MULTISHOT; |
598 | /* |
599 | * Store the buffer group for this multishot receive separately, |
600 | * as if we end up doing an io-wq based issue that selects a |
601 | * buffer, it has to be committed immediately and that will |
602 | * clear ->buf_list. This means we lose the link to the buffer |
603 | * list, and the eventual buffer put on completion then cannot |
604 | * restore it. |
605 | */ |
606 | sr->buf_group = req->buf_index; |
607 | } |
608 | |
609 | #ifdef CONFIG_COMPAT |
610 | if (req->ctx->compat) |
611 | sr->msg_flags |= MSG_CMSG_COMPAT; |
612 | #endif |
613 | sr->done_io = 0; |
614 | return 0; |
615 | } |
616 | |
617 | static inline void io_recv_prep_retry(struct io_kiocb *req) |
618 | { |
619 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
620 | |
621 | sr->done_io = 0; |
622 | sr->len = 0; /* get from the provided buffer */ |
623 | req->buf_index = sr->buf_group; |
624 | } |
625 | |
626 | /* |
627 | * Finishes io_recv and io_recvmsg. |
628 | * |
629 | * Returns true if it is actually finished, or false if it should run |
630 | * again (for multishot). |
631 | */ |
632 | static inline bool io_recv_finish(struct io_kiocb *req, int *ret, |
633 | struct msghdr *msg, bool mshot_finished, |
634 | unsigned issue_flags) |
635 | { |
636 | unsigned int cflags; |
637 | |
638 | cflags = io_put_kbuf(req, issue_flags); |
639 | if (msg->msg_inq && msg->msg_inq != -1) |
640 | cflags |= IORING_CQE_F_SOCK_NONEMPTY; |
641 | |
642 | if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { |
643 | io_req_set_res(req, res: *ret, cflags); |
644 | *ret = IOU_OK; |
645 | return true; |
646 | } |
647 | |
648 | if (!mshot_finished) { |
649 | if (io_fill_cqe_req_aux(req, defer: issue_flags & IO_URING_F_COMPLETE_DEFER, |
650 | res: *ret, cflags: cflags | IORING_CQE_F_MORE)) { |
651 | io_recv_prep_retry(req); |
652 | /* Known not-empty or unknown state, retry */ |
653 | if (cflags & IORING_CQE_F_SOCK_NONEMPTY || |
654 | msg->msg_inq == -1) |
655 | return false; |
656 | if (issue_flags & IO_URING_F_MULTISHOT) |
657 | *ret = IOU_ISSUE_SKIP_COMPLETE; |
658 | else |
659 | *ret = -EAGAIN; |
660 | return true; |
661 | } |
662 | /* Otherwise stop multishot but use the current result. */ |
663 | } |
664 | |
665 | io_req_set_res(req, res: *ret, cflags); |
666 | |
667 | if (issue_flags & IO_URING_F_MULTISHOT) |
668 | *ret = IOU_STOP_MULTISHOT; |
669 | else |
670 | *ret = IOU_OK; |
671 | return true; |
672 | } |
673 | |
674 | static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg, |
675 | struct io_sr_msg *sr, void __user **buf, |
676 | size_t *len) |
677 | { |
678 | unsigned long ubuf = (unsigned long) *buf; |
679 | unsigned long hdr; |
680 | |
681 | hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + |
682 | kmsg->controllen; |
683 | if (*len < hdr) |
684 | return -EFAULT; |
685 | |
686 | if (kmsg->controllen) { |
687 | unsigned long control = ubuf + hdr - kmsg->controllen; |
688 | |
689 | kmsg->msg.msg_control_user = (void __user *) control; |
690 | kmsg->msg.msg_controllen = kmsg->controllen; |
691 | } |
692 | |
693 | sr->buf = *buf; /* stash for later copy */ |
694 | *buf = (void __user *) (ubuf + hdr); |
695 | kmsg->payloadlen = *len = *len - hdr; |
696 | return 0; |
697 | } |
698 | |
699 | struct io_recvmsg_multishot_hdr { |
700 | struct io_uring_recvmsg_out msg; |
701 | struct sockaddr_storage addr; |
702 | }; |
703 | |
704 | static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io, |
705 | struct io_async_msghdr *kmsg, |
706 | unsigned int flags, bool *finished) |
707 | { |
708 | int err; |
709 | int copy_len; |
710 | struct io_recvmsg_multishot_hdr hdr; |
711 | |
712 | if (kmsg->namelen) |
713 | kmsg->msg.msg_name = &hdr.addr; |
714 | kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); |
715 | kmsg->msg.msg_namelen = 0; |
716 | |
717 | if (sock->file->f_flags & O_NONBLOCK) |
718 | flags |= MSG_DONTWAIT; |
719 | |
720 | err = sock_recvmsg(sock, msg: &kmsg->msg, flags); |
721 | *finished = err <= 0; |
722 | if (err < 0) |
723 | return err; |
724 | |
725 | hdr.msg = (struct io_uring_recvmsg_out) { |
726 | .controllen = kmsg->controllen - kmsg->msg.msg_controllen, |
727 | .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT |
728 | }; |
729 | |
730 | hdr.msg.payloadlen = err; |
731 | if (err > kmsg->payloadlen) |
732 | err = kmsg->payloadlen; |
733 | |
734 | copy_len = sizeof(struct io_uring_recvmsg_out); |
735 | if (kmsg->msg.msg_namelen > kmsg->namelen) |
736 | copy_len += kmsg->namelen; |
737 | else |
738 | copy_len += kmsg->msg.msg_namelen; |
739 | |
740 | /* |
741 | * "fromlen shall refer to the value before truncation.." |
742 | * 1003.1g |
743 | */ |
744 | hdr.msg.namelen = kmsg->msg.msg_namelen; |
745 | |
746 | /* ensure that there is no gap between hdr and sockaddr_storage */ |
747 | BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) != |
748 | sizeof(struct io_uring_recvmsg_out)); |
749 | if (copy_to_user(to: io->buf, from: &hdr, n: copy_len)) { |
750 | *finished = true; |
751 | return -EFAULT; |
752 | } |
753 | |
754 | return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + |
755 | kmsg->controllen + err; |
756 | } |
757 | |
758 | int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) |
759 | { |
760 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
761 | struct io_async_msghdr iomsg, *kmsg; |
762 | struct socket *sock; |
763 | unsigned flags; |
764 | int ret, min_ret = 0; |
765 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; |
766 | bool mshot_finished = true; |
767 | |
768 | sock = sock_from_file(file: req->file); |
769 | if (unlikely(!sock)) |
770 | return -ENOTSOCK; |
771 | |
772 | if (req_has_async_data(req)) { |
773 | kmsg = req->async_data; |
774 | } else { |
775 | ret = io_recvmsg_copy_hdr(req, iomsg: &iomsg); |
776 | if (ret) |
777 | return ret; |
778 | kmsg = &iomsg; |
779 | } |
780 | |
781 | if (!(req->flags & REQ_F_POLLED) && |
782 | (sr->flags & IORING_RECVSEND_POLL_FIRST)) |
783 | return io_setup_async_msg(req, kmsg, issue_flags); |
784 | |
785 | if (!io_check_multishot(req, issue_flags)) |
786 | return io_setup_async_msg(req, kmsg, issue_flags); |
787 | |
788 | retry_multishot: |
789 | if (io_do_buffer_select(req)) { |
790 | void __user *buf; |
791 | size_t len = sr->len; |
792 | |
793 | buf = io_buffer_select(req, len: &len, issue_flags); |
794 | if (!buf) |
795 | return -ENOBUFS; |
796 | |
797 | if (req->flags & REQ_F_APOLL_MULTISHOT) { |
798 | ret = io_recvmsg_prep_multishot(kmsg, sr, buf: &buf, len: &len); |
799 | if (ret) { |
800 | io_kbuf_recycle(req, issue_flags); |
801 | return ret; |
802 | } |
803 | } |
804 | |
805 | iov_iter_ubuf(i: &kmsg->msg.msg_iter, ITER_DEST, buf, count: len); |
806 | } |
807 | |
808 | flags = sr->msg_flags; |
809 | if (force_nonblock) |
810 | flags |= MSG_DONTWAIT; |
811 | |
812 | kmsg->msg.msg_get_inq = 1; |
813 | kmsg->msg.msg_inq = -1; |
814 | if (req->flags & REQ_F_APOLL_MULTISHOT) { |
815 | ret = io_recvmsg_multishot(sock, io: sr, kmsg, flags, |
816 | finished: &mshot_finished); |
817 | } else { |
818 | /* disable partial retry for recvmsg with cmsg attached */ |
819 | if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) |
820 | min_ret = iov_iter_count(i: &kmsg->msg.msg_iter); |
821 | |
822 | ret = __sys_recvmsg_sock(sock, msg: &kmsg->msg, umsg: sr->umsg, |
823 | uaddr: kmsg->uaddr, flags); |
824 | } |
825 | |
826 | if (ret < min_ret) { |
827 | if (ret == -EAGAIN && force_nonblock) { |
828 | ret = io_setup_async_msg(req, kmsg, issue_flags); |
829 | if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) { |
830 | io_kbuf_recycle(req, issue_flags); |
831 | return IOU_ISSUE_SKIP_COMPLETE; |
832 | } |
833 | return ret; |
834 | } |
835 | if (ret > 0 && io_net_retry(sock, flags)) { |
836 | sr->done_io += ret; |
837 | req->flags |= REQ_F_PARTIAL_IO; |
838 | return io_setup_async_msg(req, kmsg, issue_flags); |
839 | } |
840 | if (ret == -ERESTARTSYS) |
841 | ret = -EINTR; |
842 | req_set_fail(req); |
843 | } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { |
844 | req_set_fail(req); |
845 | } |
846 | |
847 | if (ret > 0) |
848 | ret += sr->done_io; |
849 | else if (sr->done_io) |
850 | ret = sr->done_io; |
851 | else |
852 | io_kbuf_recycle(req, issue_flags); |
853 | |
854 | if (!io_recv_finish(req, ret: &ret, msg: &kmsg->msg, mshot_finished, issue_flags)) |
855 | goto retry_multishot; |
856 | |
857 | if (mshot_finished) { |
858 | /* fast path, check for non-NULL to avoid function call */ |
859 | if (kmsg->free_iov) |
860 | kfree(objp: kmsg->free_iov); |
861 | io_netmsg_recycle(req, issue_flags); |
862 | req->flags &= ~REQ_F_NEED_CLEANUP; |
863 | } |
864 | |
865 | return ret; |
866 | } |
867 | |
868 | int io_recv(struct io_kiocb *req, unsigned int issue_flags) |
869 | { |
870 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
871 | struct msghdr msg; |
872 | struct socket *sock; |
873 | unsigned flags; |
874 | int ret, min_ret = 0; |
875 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; |
876 | size_t len = sr->len; |
877 | |
878 | if (!(req->flags & REQ_F_POLLED) && |
879 | (sr->flags & IORING_RECVSEND_POLL_FIRST)) |
880 | return -EAGAIN; |
881 | |
882 | if (!io_check_multishot(req, issue_flags)) |
883 | return -EAGAIN; |
884 | |
885 | sock = sock_from_file(file: req->file); |
886 | if (unlikely(!sock)) |
887 | return -ENOTSOCK; |
888 | |
889 | msg.msg_name = NULL; |
890 | msg.msg_namelen = 0; |
891 | msg.msg_control = NULL; |
892 | msg.msg_get_inq = 1; |
893 | msg.msg_controllen = 0; |
894 | msg.msg_iocb = NULL; |
895 | msg.msg_ubuf = NULL; |
896 | |
897 | retry_multishot: |
898 | if (io_do_buffer_select(req)) { |
899 | void __user *buf; |
900 | |
901 | buf = io_buffer_select(req, len: &len, issue_flags); |
902 | if (!buf) |
903 | return -ENOBUFS; |
904 | sr->buf = buf; |
905 | } |
906 | |
907 | ret = import_ubuf(ITER_DEST, buf: sr->buf, len, i: &msg.msg_iter); |
908 | if (unlikely(ret)) |
909 | goto out_free; |
910 | |
911 | msg.msg_inq = -1; |
912 | msg.msg_flags = 0; |
913 | |
914 | flags = sr->msg_flags; |
915 | if (force_nonblock) |
916 | flags |= MSG_DONTWAIT; |
917 | if (flags & MSG_WAITALL) |
918 | min_ret = iov_iter_count(i: &msg.msg_iter); |
919 | |
920 | ret = sock_recvmsg(sock, msg: &msg, flags); |
921 | if (ret < min_ret) { |
922 | if (ret == -EAGAIN && force_nonblock) { |
923 | if (issue_flags & IO_URING_F_MULTISHOT) { |
924 | io_kbuf_recycle(req, issue_flags); |
925 | return IOU_ISSUE_SKIP_COMPLETE; |
926 | } |
927 | |
928 | return -EAGAIN; |
929 | } |
930 | if (ret > 0 && io_net_retry(sock, flags)) { |
931 | sr->len -= ret; |
932 | sr->buf += ret; |
933 | sr->done_io += ret; |
934 | req->flags |= REQ_F_PARTIAL_IO; |
935 | return -EAGAIN; |
936 | } |
937 | if (ret == -ERESTARTSYS) |
938 | ret = -EINTR; |
939 | req_set_fail(req); |
940 | } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { |
941 | out_free: |
942 | req_set_fail(req); |
943 | } |
944 | |
945 | if (ret > 0) |
946 | ret += sr->done_io; |
947 | else if (sr->done_io) |
948 | ret = sr->done_io; |
949 | else |
950 | io_kbuf_recycle(req, issue_flags); |
951 | |
952 | if (!io_recv_finish(req, ret: &ret, msg: &msg, mshot_finished: ret <= 0, issue_flags)) |
953 | goto retry_multishot; |
954 | |
955 | return ret; |
956 | } |
957 | |
958 | void io_send_zc_cleanup(struct io_kiocb *req) |
959 | { |
960 | struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); |
961 | struct io_async_msghdr *io; |
962 | |
963 | if (req_has_async_data(req)) { |
964 | io = req->async_data; |
965 | /* might be ->fast_iov if *msg_copy_hdr failed */ |
966 | if (io->free_iov != io->fast_iov) |
967 | kfree(objp: io->free_iov); |
968 | } |
969 | if (zc->notif) { |
970 | io_notif_flush(notif: zc->notif); |
971 | zc->notif = NULL; |
972 | } |
973 | } |
974 | |
975 | #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) |
976 | #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE) |
977 | |
978 | int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
979 | { |
980 | struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); |
981 | struct io_ring_ctx *ctx = req->ctx; |
982 | struct io_kiocb *notif; |
983 | |
984 | if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) |
985 | return -EINVAL; |
986 | /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ |
987 | if (req->flags & REQ_F_CQE_SKIP) |
988 | return -EINVAL; |
989 | |
990 | notif = zc->notif = io_alloc_notif(ctx); |
991 | if (!notif) |
992 | return -ENOMEM; |
993 | notif->cqe.user_data = req->cqe.user_data; |
994 | notif->cqe.res = 0; |
995 | notif->cqe.flags = IORING_CQE_F_NOTIF; |
996 | req->flags |= REQ_F_NEED_CLEANUP; |
997 | |
998 | zc->flags = READ_ONCE(sqe->ioprio); |
999 | if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) { |
1000 | if (zc->flags & ~IO_ZC_FLAGS_VALID) |
1001 | return -EINVAL; |
1002 | if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) { |
1003 | io_notif_set_extended(notif); |
1004 | io_notif_to_data(notif)->zc_report = true; |
1005 | } |
1006 | } |
1007 | |
1008 | if (zc->flags & IORING_RECVSEND_FIXED_BUF) { |
1009 | unsigned idx = READ_ONCE(sqe->buf_index); |
1010 | |
1011 | if (unlikely(idx >= ctx->nr_user_bufs)) |
1012 | return -EFAULT; |
1013 | idx = array_index_nospec(idx, ctx->nr_user_bufs); |
1014 | req->imu = READ_ONCE(ctx->user_bufs[idx]); |
1015 | io_req_set_rsrc_node(req: notif, ctx, issue_flags: 0); |
1016 | } |
1017 | |
1018 | if (req->opcode == IORING_OP_SEND_ZC) { |
1019 | if (READ_ONCE(sqe->__pad3[0])) |
1020 | return -EINVAL; |
1021 | zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2)); |
1022 | zc->addr_len = READ_ONCE(sqe->addr_len); |
1023 | } else { |
1024 | if (unlikely(sqe->addr2 || sqe->file_index)) |
1025 | return -EINVAL; |
1026 | if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF)) |
1027 | return -EINVAL; |
1028 | } |
1029 | |
1030 | zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
1031 | zc->len = READ_ONCE(sqe->len); |
1032 | zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; |
1033 | if (zc->msg_flags & MSG_DONTWAIT) |
1034 | req->flags |= REQ_F_NOWAIT; |
1035 | |
1036 | zc->done_io = 0; |
1037 | |
1038 | #ifdef CONFIG_COMPAT |
1039 | if (req->ctx->compat) |
1040 | zc->msg_flags |= MSG_CMSG_COMPAT; |
1041 | #endif |
1042 | return 0; |
1043 | } |
1044 | |
1045 | static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb, |
1046 | struct iov_iter *from, size_t length) |
1047 | { |
1048 | skb_zcopy_downgrade_managed(skb); |
1049 | return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); |
1050 | } |
1051 | |
1052 | static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, |
1053 | struct iov_iter *from, size_t length) |
1054 | { |
1055 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
1056 | int frag = shinfo->nr_frags; |
1057 | int ret = 0; |
1058 | struct bvec_iter bi; |
1059 | ssize_t copied = 0; |
1060 | unsigned long truesize = 0; |
1061 | |
1062 | if (!frag) |
1063 | shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; |
1064 | else if (unlikely(!skb_zcopy_managed(skb))) |
1065 | return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); |
1066 | |
1067 | bi.bi_size = min(from->count, length); |
1068 | bi.bi_bvec_done = from->iov_offset; |
1069 | bi.bi_idx = 0; |
1070 | |
1071 | while (bi.bi_size && frag < MAX_SKB_FRAGS) { |
1072 | struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi); |
1073 | |
1074 | copied += v.bv_len; |
1075 | truesize += PAGE_ALIGN(v.bv_len + v.bv_offset); |
1076 | __skb_fill_page_desc_noacc(shinfo, i: frag++, page: v.bv_page, |
1077 | off: v.bv_offset, size: v.bv_len); |
1078 | bvec_iter_advance_single(bv: from->bvec, iter: &bi, bytes: v.bv_len); |
1079 | } |
1080 | if (bi.bi_size) |
1081 | ret = -EMSGSIZE; |
1082 | |
1083 | shinfo->nr_frags = frag; |
1084 | from->bvec += bi.bi_idx; |
1085 | from->nr_segs -= bi.bi_idx; |
1086 | from->count -= copied; |
1087 | from->iov_offset = bi.bi_bvec_done; |
1088 | |
1089 | skb->data_len += copied; |
1090 | skb->len += copied; |
1091 | skb->truesize += truesize; |
1092 | |
1093 | if (sk && sk->sk_type == SOCK_STREAM) { |
1094 | sk_wmem_queued_add(sk, val: truesize); |
1095 | if (!skb_zcopy_pure(skb)) |
1096 | sk_mem_charge(sk, size: truesize); |
1097 | } else { |
1098 | refcount_add(i: truesize, r: &skb->sk->sk_wmem_alloc); |
1099 | } |
1100 | return ret; |
1101 | } |
1102 | |
1103 | int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) |
1104 | { |
1105 | struct sockaddr_storage __address; |
1106 | struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); |
1107 | struct msghdr msg; |
1108 | struct socket *sock; |
1109 | unsigned msg_flags; |
1110 | int ret, min_ret = 0; |
1111 | |
1112 | sock = sock_from_file(file: req->file); |
1113 | if (unlikely(!sock)) |
1114 | return -ENOTSOCK; |
1115 | if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) |
1116 | return -EOPNOTSUPP; |
1117 | |
1118 | msg.msg_name = NULL; |
1119 | msg.msg_control = NULL; |
1120 | msg.msg_controllen = 0; |
1121 | msg.msg_namelen = 0; |
1122 | |
1123 | if (zc->addr) { |
1124 | if (req_has_async_data(req)) { |
1125 | struct io_async_msghdr *io = req->async_data; |
1126 | |
1127 | msg.msg_name = &io->addr; |
1128 | } else { |
1129 | ret = move_addr_to_kernel(uaddr: zc->addr, ulen: zc->addr_len, kaddr: &__address); |
1130 | if (unlikely(ret < 0)) |
1131 | return ret; |
1132 | msg.msg_name = (struct sockaddr *)&__address; |
1133 | } |
1134 | msg.msg_namelen = zc->addr_len; |
1135 | } |
1136 | |
1137 | if (!(req->flags & REQ_F_POLLED) && |
1138 | (zc->flags & IORING_RECVSEND_POLL_FIRST)) |
1139 | return io_setup_async_addr(req, addr_storage: &__address, issue_flags); |
1140 | |
1141 | if (zc->flags & IORING_RECVSEND_FIXED_BUF) { |
1142 | ret = io_import_fixed(ITER_SOURCE, iter: &msg.msg_iter, imu: req->imu, |
1143 | buf_addr: (u64)(uintptr_t)zc->buf, len: zc->len); |
1144 | if (unlikely(ret)) |
1145 | return ret; |
1146 | msg.sg_from_iter = io_sg_from_iter; |
1147 | } else { |
1148 | io_notif_set_extended(notif: zc->notif); |
1149 | ret = import_ubuf(ITER_SOURCE, buf: zc->buf, len: zc->len, i: &msg.msg_iter); |
1150 | if (unlikely(ret)) |
1151 | return ret; |
1152 | ret = io_notif_account_mem(notif: zc->notif, len: zc->len); |
1153 | if (unlikely(ret)) |
1154 | return ret; |
1155 | msg.sg_from_iter = io_sg_from_iter_iovec; |
1156 | } |
1157 | |
1158 | msg_flags = zc->msg_flags | MSG_ZEROCOPY; |
1159 | if (issue_flags & IO_URING_F_NONBLOCK) |
1160 | msg_flags |= MSG_DONTWAIT; |
1161 | if (msg_flags & MSG_WAITALL) |
1162 | min_ret = iov_iter_count(i: &msg.msg_iter); |
1163 | msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; |
1164 | |
1165 | msg.msg_flags = msg_flags; |
1166 | msg.msg_ubuf = &io_notif_to_data(notif: zc->notif)->uarg; |
1167 | ret = sock_sendmsg(sock, msg: &msg); |
1168 | |
1169 | if (unlikely(ret < min_ret)) { |
1170 | if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) |
1171 | return io_setup_async_addr(req, addr_storage: &__address, issue_flags); |
1172 | |
1173 | if (ret > 0 && io_net_retry(sock, flags: msg.msg_flags)) { |
1174 | zc->len -= ret; |
1175 | zc->buf += ret; |
1176 | zc->done_io += ret; |
1177 | req->flags |= REQ_F_PARTIAL_IO; |
1178 | return io_setup_async_addr(req, addr_storage: &__address, issue_flags); |
1179 | } |
1180 | if (ret == -ERESTARTSYS) |
1181 | ret = -EINTR; |
1182 | req_set_fail(req); |
1183 | } |
1184 | |
1185 | if (ret >= 0) |
1186 | ret += zc->done_io; |
1187 | else if (zc->done_io) |
1188 | ret = zc->done_io; |
1189 | |
1190 | /* |
1191 | * If we're in io-wq we can't rely on tw ordering guarantees, defer |
1192 | * flushing notif to io_send_zc_cleanup() |
1193 | */ |
1194 | if (!(issue_flags & IO_URING_F_UNLOCKED)) { |
1195 | io_notif_flush(notif: zc->notif); |
1196 | req->flags &= ~REQ_F_NEED_CLEANUP; |
1197 | } |
1198 | io_req_set_res(req, res: ret, IORING_CQE_F_MORE); |
1199 | return IOU_OK; |
1200 | } |
1201 | |
1202 | int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) |
1203 | { |
1204 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
1205 | struct io_async_msghdr iomsg, *kmsg; |
1206 | struct socket *sock; |
1207 | unsigned flags; |
1208 | int ret, min_ret = 0; |
1209 | |
1210 | io_notif_set_extended(notif: sr->notif); |
1211 | |
1212 | sock = sock_from_file(file: req->file); |
1213 | if (unlikely(!sock)) |
1214 | return -ENOTSOCK; |
1215 | if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) |
1216 | return -EOPNOTSUPP; |
1217 | |
1218 | if (req_has_async_data(req)) { |
1219 | kmsg = req->async_data; |
1220 | } else { |
1221 | ret = io_sendmsg_copy_hdr(req, iomsg: &iomsg); |
1222 | if (ret) |
1223 | return ret; |
1224 | kmsg = &iomsg; |
1225 | } |
1226 | |
1227 | if (!(req->flags & REQ_F_POLLED) && |
1228 | (sr->flags & IORING_RECVSEND_POLL_FIRST)) |
1229 | return io_setup_async_msg(req, kmsg, issue_flags); |
1230 | |
1231 | flags = sr->msg_flags | MSG_ZEROCOPY; |
1232 | if (issue_flags & IO_URING_F_NONBLOCK) |
1233 | flags |= MSG_DONTWAIT; |
1234 | if (flags & MSG_WAITALL) |
1235 | min_ret = iov_iter_count(i: &kmsg->msg.msg_iter); |
1236 | |
1237 | kmsg->msg.msg_ubuf = &io_notif_to_data(notif: sr->notif)->uarg; |
1238 | kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; |
1239 | ret = __sys_sendmsg_sock(sock, msg: &kmsg->msg, flags); |
1240 | |
1241 | if (unlikely(ret < min_ret)) { |
1242 | if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) |
1243 | return io_setup_async_msg(req, kmsg, issue_flags); |
1244 | |
1245 | if (ret > 0 && io_net_retry(sock, flags)) { |
1246 | sr->done_io += ret; |
1247 | req->flags |= REQ_F_PARTIAL_IO; |
1248 | return io_setup_async_msg(req, kmsg, issue_flags); |
1249 | } |
1250 | if (ret == -ERESTARTSYS) |
1251 | ret = -EINTR; |
1252 | req_set_fail(req); |
1253 | } |
1254 | /* fast path, check for non-NULL to avoid function call */ |
1255 | if (kmsg->free_iov) { |
1256 | kfree(objp: kmsg->free_iov); |
1257 | kmsg->free_iov = NULL; |
1258 | } |
1259 | |
1260 | io_netmsg_recycle(req, issue_flags); |
1261 | if (ret >= 0) |
1262 | ret += sr->done_io; |
1263 | else if (sr->done_io) |
1264 | ret = sr->done_io; |
1265 | |
1266 | /* |
1267 | * If we're in io-wq we can't rely on tw ordering guarantees, defer |
1268 | * flushing notif to io_send_zc_cleanup() |
1269 | */ |
1270 | if (!(issue_flags & IO_URING_F_UNLOCKED)) { |
1271 | io_notif_flush(notif: sr->notif); |
1272 | req->flags &= ~REQ_F_NEED_CLEANUP; |
1273 | } |
1274 | io_req_set_res(req, res: ret, IORING_CQE_F_MORE); |
1275 | return IOU_OK; |
1276 | } |
1277 | |
1278 | void io_sendrecv_fail(struct io_kiocb *req) |
1279 | { |
1280 | struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); |
1281 | |
1282 | if (req->flags & REQ_F_PARTIAL_IO) |
1283 | req->cqe.res = sr->done_io; |
1284 | |
1285 | if ((req->flags & REQ_F_NEED_CLEANUP) && |
1286 | (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) |
1287 | req->cqe.flags |= IORING_CQE_F_MORE; |
1288 | } |
1289 | |
1290 | int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
1291 | { |
1292 | struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); |
1293 | unsigned flags; |
1294 | |
1295 | if (sqe->len || sqe->buf_index) |
1296 | return -EINVAL; |
1297 | |
1298 | accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
1299 | accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); |
1300 | accept->flags = READ_ONCE(sqe->accept_flags); |
1301 | accept->nofile = rlimit(RLIMIT_NOFILE); |
1302 | flags = READ_ONCE(sqe->ioprio); |
1303 | if (flags & ~IORING_ACCEPT_MULTISHOT) |
1304 | return -EINVAL; |
1305 | |
1306 | accept->file_slot = READ_ONCE(sqe->file_index); |
1307 | if (accept->file_slot) { |
1308 | if (accept->flags & SOCK_CLOEXEC) |
1309 | return -EINVAL; |
1310 | if (flags & IORING_ACCEPT_MULTISHOT && |
1311 | accept->file_slot != IORING_FILE_INDEX_ALLOC) |
1312 | return -EINVAL; |
1313 | } |
1314 | if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) |
1315 | return -EINVAL; |
1316 | if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) |
1317 | accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; |
1318 | if (flags & IORING_ACCEPT_MULTISHOT) |
1319 | req->flags |= REQ_F_APOLL_MULTISHOT; |
1320 | return 0; |
1321 | } |
1322 | |
1323 | int io_accept(struct io_kiocb *req, unsigned int issue_flags) |
1324 | { |
1325 | struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept); |
1326 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; |
1327 | unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; |
1328 | bool fixed = !!accept->file_slot; |
1329 | struct file *file; |
1330 | int ret, fd; |
1331 | |
1332 | if (!io_check_multishot(req, issue_flags)) |
1333 | return -EAGAIN; |
1334 | retry: |
1335 | if (!fixed) { |
1336 | fd = __get_unused_fd_flags(flags: accept->flags, nofile: accept->nofile); |
1337 | if (unlikely(fd < 0)) |
1338 | return fd; |
1339 | } |
1340 | file = do_accept(file: req->file, file_flags, upeer_sockaddr: accept->addr, upeer_addrlen: accept->addr_len, |
1341 | flags: accept->flags); |
1342 | if (IS_ERR(ptr: file)) { |
1343 | if (!fixed) |
1344 | put_unused_fd(fd); |
1345 | ret = PTR_ERR(ptr: file); |
1346 | if (ret == -EAGAIN && force_nonblock) { |
1347 | /* |
1348 | * if it's multishot and polled, we don't need to |
1349 | * return EAGAIN to arm the poll infra since it |
1350 | * has already been done |
1351 | */ |
1352 | if (issue_flags & IO_URING_F_MULTISHOT) |
1353 | ret = IOU_ISSUE_SKIP_COMPLETE; |
1354 | return ret; |
1355 | } |
1356 | if (ret == -ERESTARTSYS) |
1357 | ret = -EINTR; |
1358 | req_set_fail(req); |
1359 | } else if (!fixed) { |
1360 | fd_install(fd, file); |
1361 | ret = fd; |
1362 | } else { |
1363 | ret = io_fixed_fd_install(req, issue_flags, file, |
1364 | file_slot: accept->file_slot); |
1365 | } |
1366 | |
1367 | if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { |
1368 | io_req_set_res(req, res: ret, cflags: 0); |
1369 | return IOU_OK; |
1370 | } |
1371 | |
1372 | if (ret < 0) |
1373 | return ret; |
1374 | if (io_fill_cqe_req_aux(req, defer: issue_flags & IO_URING_F_COMPLETE_DEFER, |
1375 | res: ret, IORING_CQE_F_MORE)) |
1376 | goto retry; |
1377 | |
1378 | return -ECANCELED; |
1379 | } |
1380 | |
1381 | int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
1382 | { |
1383 | struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); |
1384 | |
1385 | if (sqe->addr || sqe->rw_flags || sqe->buf_index) |
1386 | return -EINVAL; |
1387 | |
1388 | sock->domain = READ_ONCE(sqe->fd); |
1389 | sock->type = READ_ONCE(sqe->off); |
1390 | sock->protocol = READ_ONCE(sqe->len); |
1391 | sock->file_slot = READ_ONCE(sqe->file_index); |
1392 | sock->nofile = rlimit(RLIMIT_NOFILE); |
1393 | |
1394 | sock->flags = sock->type & ~SOCK_TYPE_MASK; |
1395 | if (sock->file_slot && (sock->flags & SOCK_CLOEXEC)) |
1396 | return -EINVAL; |
1397 | if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) |
1398 | return -EINVAL; |
1399 | return 0; |
1400 | } |
1401 | |
1402 | int io_socket(struct io_kiocb *req, unsigned int issue_flags) |
1403 | { |
1404 | struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket); |
1405 | bool fixed = !!sock->file_slot; |
1406 | struct file *file; |
1407 | int ret, fd; |
1408 | |
1409 | if (!fixed) { |
1410 | fd = __get_unused_fd_flags(flags: sock->flags, nofile: sock->nofile); |
1411 | if (unlikely(fd < 0)) |
1412 | return fd; |
1413 | } |
1414 | file = __sys_socket_file(family: sock->domain, type: sock->type, protocol: sock->protocol); |
1415 | if (IS_ERR(ptr: file)) { |
1416 | if (!fixed) |
1417 | put_unused_fd(fd); |
1418 | ret = PTR_ERR(ptr: file); |
1419 | if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) |
1420 | return -EAGAIN; |
1421 | if (ret == -ERESTARTSYS) |
1422 | ret = -EINTR; |
1423 | req_set_fail(req); |
1424 | } else if (!fixed) { |
1425 | fd_install(fd, file); |
1426 | ret = fd; |
1427 | } else { |
1428 | ret = io_fixed_fd_install(req, issue_flags, file, |
1429 | file_slot: sock->file_slot); |
1430 | } |
1431 | io_req_set_res(req, res: ret, cflags: 0); |
1432 | return IOU_OK; |
1433 | } |
1434 | |
1435 | int io_connect_prep_async(struct io_kiocb *req) |
1436 | { |
1437 | struct io_async_connect *io = req->async_data; |
1438 | struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); |
1439 | |
1440 | return move_addr_to_kernel(uaddr: conn->addr, ulen: conn->addr_len, kaddr: &io->address); |
1441 | } |
1442 | |
1443 | int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
1444 | { |
1445 | struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect); |
1446 | |
1447 | if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) |
1448 | return -EINVAL; |
1449 | |
1450 | conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
1451 | conn->addr_len = READ_ONCE(sqe->addr2); |
1452 | conn->in_progress = conn->seen_econnaborted = false; |
1453 | return 0; |
1454 | } |
1455 | |
1456 | int io_connect(struct io_kiocb *req, unsigned int issue_flags) |
1457 | { |
1458 | struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect); |
1459 | struct io_async_connect __io, *io; |
1460 | unsigned file_flags; |
1461 | int ret; |
1462 | bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; |
1463 | |
1464 | if (connect->in_progress) { |
1465 | struct socket *socket; |
1466 | |
1467 | ret = -ENOTSOCK; |
1468 | socket = sock_from_file(file: req->file); |
1469 | if (socket) |
1470 | ret = sock_error(sk: socket->sk); |
1471 | goto out; |
1472 | } |
1473 | |
1474 | if (req_has_async_data(req)) { |
1475 | io = req->async_data; |
1476 | } else { |
1477 | ret = move_addr_to_kernel(uaddr: connect->addr, |
1478 | ulen: connect->addr_len, |
1479 | kaddr: &__io.address); |
1480 | if (ret) |
1481 | goto out; |
1482 | io = &__io; |
1483 | } |
1484 | |
1485 | file_flags = force_nonblock ? O_NONBLOCK : 0; |
1486 | |
1487 | ret = __sys_connect_file(file: req->file, addr: &io->address, |
1488 | addrlen: connect->addr_len, file_flags); |
1489 | if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED) |
1490 | && force_nonblock) { |
1491 | if (ret == -EINPROGRESS) { |
1492 | connect->in_progress = true; |
1493 | return -EAGAIN; |
1494 | } |
1495 | if (ret == -ECONNABORTED) { |
1496 | if (connect->seen_econnaborted) |
1497 | goto out; |
1498 | connect->seen_econnaborted = true; |
1499 | } |
1500 | if (req_has_async_data(req)) |
1501 | return -EAGAIN; |
1502 | if (io_alloc_async_data(req)) { |
1503 | ret = -ENOMEM; |
1504 | goto out; |
1505 | } |
1506 | memcpy(req->async_data, &__io, sizeof(__io)); |
1507 | return -EAGAIN; |
1508 | } |
1509 | if (ret == -ERESTARTSYS) |
1510 | ret = -EINTR; |
1511 | out: |
1512 | if (ret < 0) |
1513 | req_set_fail(req); |
1514 | io_req_set_res(req, res: ret, cflags: 0); |
1515 | return IOU_OK; |
1516 | } |
1517 | |
1518 | void io_netmsg_cache_free(struct io_cache_entry *entry) |
1519 | { |
1520 | kfree(container_of(entry, struct io_async_msghdr, cache)); |
1521 | } |
1522 | #endif |
1523 | |