1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/ipc/msg.c |
4 | * Copyright (C) 1992 Krishna Balasubramanian |
5 | * |
6 | * Removed all the remaining kerneld mess |
7 | * Catch the -EFAULT stuff properly |
8 | * Use GFP_KERNEL for messages as in 1.2 |
9 | * Fixed up the unchecked user space derefs |
10 | * Copyright (C) 1998 Alan Cox & Andi Kleen |
11 | * |
12 | * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com> |
13 | * |
14 | * mostly rewritten, threaded and wake-one semantics added |
15 | * MSGMAX limit removed, sysctl's added |
16 | * (c) 1999 Manfred Spraul <manfred@colorfullife.com> |
17 | * |
18 | * support for audit of ipc object properties and permission changes |
19 | * Dustin Kirkland <dustin.kirkland@us.ibm.com> |
20 | * |
21 | * namespaces support |
22 | * OpenVZ, SWsoft Inc. |
23 | * Pavel Emelianov <xemul@openvz.org> |
24 | */ |
25 | |
26 | #include <linux/capability.h> |
27 | #include <linux/msg.h> |
28 | #include <linux/spinlock.h> |
29 | #include <linux/init.h> |
30 | #include <linux/mm.h> |
31 | #include <linux/proc_fs.h> |
32 | #include <linux/list.h> |
33 | #include <linux/security.h> |
34 | #include <linux/sched/wake_q.h> |
35 | #include <linux/syscalls.h> |
36 | #include <linux/audit.h> |
37 | #include <linux/seq_file.h> |
38 | #include <linux/rwsem.h> |
39 | #include <linux/nsproxy.h> |
40 | #include <linux/ipc_namespace.h> |
41 | #include <linux/rhashtable.h> |
42 | #include <linux/percpu_counter.h> |
43 | |
44 | #include <asm/current.h> |
45 | #include <linux/uaccess.h> |
46 | #include "util.h" |
47 | |
48 | /* one msq_queue structure for each present queue on the system */ |
49 | struct msg_queue { |
50 | struct kern_ipc_perm q_perm; |
51 | time64_t q_stime; /* last msgsnd time */ |
52 | time64_t q_rtime; /* last msgrcv time */ |
53 | time64_t q_ctime; /* last change time */ |
54 | unsigned long q_cbytes; /* current number of bytes on queue */ |
55 | unsigned long q_qnum; /* number of messages in queue */ |
56 | unsigned long q_qbytes; /* max number of bytes on queue */ |
57 | struct pid *q_lspid; /* pid of last msgsnd */ |
58 | struct pid *q_lrpid; /* last receive pid */ |
59 | |
60 | struct list_head q_messages; |
61 | struct list_head q_receivers; |
62 | struct list_head q_senders; |
63 | } __randomize_layout; |
64 | |
65 | /* |
66 | * MSG_BARRIER Locking: |
67 | * |
68 | * Similar to the optimization used in ipc/mqueue.c, one syscall return path |
69 | * does not acquire any locks when it sees that a message exists in |
70 | * msg_receiver.r_msg. Therefore r_msg is set using smp_store_release() |
71 | * and accessed using READ_ONCE()+smp_acquire__after_ctrl_dep(). In addition, |
72 | * wake_q_add_safe() is used. See ipc/mqueue.c for more details |
73 | */ |
74 | |
75 | /* one msg_receiver structure for each sleeping receiver */ |
76 | struct msg_receiver { |
77 | struct list_head r_list; |
78 | struct task_struct *r_tsk; |
79 | |
80 | int r_mode; |
81 | long r_msgtype; |
82 | long r_maxsize; |
83 | |
84 | struct msg_msg *r_msg; |
85 | }; |
86 | |
87 | /* one msg_sender for each sleeping sender */ |
88 | struct msg_sender { |
89 | struct list_head list; |
90 | struct task_struct *tsk; |
91 | size_t msgsz; |
92 | }; |
93 | |
94 | #define SEARCH_ANY 1 |
95 | #define SEARCH_EQUAL 2 |
96 | #define SEARCH_NOTEQUAL 3 |
97 | #define SEARCH_LESSEQUAL 4 |
98 | #define SEARCH_NUMBER 5 |
99 | |
100 | #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) |
101 | |
102 | static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) |
103 | { |
104 | struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(ids: &msg_ids(ns), id); |
105 | |
106 | if (IS_ERR(ptr: ipcp)) |
107 | return ERR_CAST(ptr: ipcp); |
108 | |
109 | return container_of(ipcp, struct msg_queue, q_perm); |
110 | } |
111 | |
112 | static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns, |
113 | int id) |
114 | { |
115 | struct kern_ipc_perm *ipcp = ipc_obtain_object_check(ids: &msg_ids(ns), id); |
116 | |
117 | if (IS_ERR(ptr: ipcp)) |
118 | return ERR_CAST(ptr: ipcp); |
119 | |
120 | return container_of(ipcp, struct msg_queue, q_perm); |
121 | } |
122 | |
123 | static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) |
124 | { |
125 | ipc_rmid(&msg_ids(ns), &s->q_perm); |
126 | } |
127 | |
128 | static void msg_rcu_free(struct rcu_head *head) |
129 | { |
130 | struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); |
131 | struct msg_queue *msq = container_of(p, struct msg_queue, q_perm); |
132 | |
133 | security_msg_queue_free(msq: &msq->q_perm); |
134 | kfree(objp: msq); |
135 | } |
136 | |
137 | /** |
138 | * newque - Create a new msg queue |
139 | * @ns: namespace |
140 | * @params: ptr to the structure that contains the key and msgflg |
141 | * |
142 | * Called with msg_ids.rwsem held (writer) |
143 | */ |
144 | static int newque(struct ipc_namespace *ns, struct ipc_params *params) |
145 | { |
146 | struct msg_queue *msq; |
147 | int retval; |
148 | key_t key = params->key; |
149 | int msgflg = params->flg; |
150 | |
151 | msq = kmalloc(size: sizeof(*msq), GFP_KERNEL_ACCOUNT); |
152 | if (unlikely(!msq)) |
153 | return -ENOMEM; |
154 | |
155 | msq->q_perm.mode = msgflg & S_IRWXUGO; |
156 | msq->q_perm.key = key; |
157 | |
158 | msq->q_perm.security = NULL; |
159 | retval = security_msg_queue_alloc(msq: &msq->q_perm); |
160 | if (retval) { |
161 | kfree(objp: msq); |
162 | return retval; |
163 | } |
164 | |
165 | msq->q_stime = msq->q_rtime = 0; |
166 | msq->q_ctime = ktime_get_real_seconds(); |
167 | msq->q_cbytes = msq->q_qnum = 0; |
168 | msq->q_qbytes = ns->msg_ctlmnb; |
169 | msq->q_lspid = msq->q_lrpid = NULL; |
170 | INIT_LIST_HEAD(list: &msq->q_messages); |
171 | INIT_LIST_HEAD(list: &msq->q_receivers); |
172 | INIT_LIST_HEAD(list: &msq->q_senders); |
173 | |
174 | /* ipc_addid() locks msq upon success. */ |
175 | retval = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); |
176 | if (retval < 0) { |
177 | ipc_rcu_putref(ptr: &msq->q_perm, func: msg_rcu_free); |
178 | return retval; |
179 | } |
180 | |
181 | ipc_unlock_object(perm: &msq->q_perm); |
182 | rcu_read_unlock(); |
183 | |
184 | return msq->q_perm.id; |
185 | } |
186 | |
187 | static inline bool msg_fits_inqueue(struct msg_queue *msq, size_t msgsz) |
188 | { |
189 | return msgsz + msq->q_cbytes <= msq->q_qbytes && |
190 | 1 + msq->q_qnum <= msq->q_qbytes; |
191 | } |
192 | |
193 | static inline void ss_add(struct msg_queue *msq, |
194 | struct msg_sender *mss, size_t msgsz) |
195 | { |
196 | mss->tsk = current; |
197 | mss->msgsz = msgsz; |
198 | /* |
199 | * No memory barrier required: we did ipc_lock_object(), |
200 | * and the waker obtains that lock before calling wake_q_add(). |
201 | */ |
202 | __set_current_state(TASK_INTERRUPTIBLE); |
203 | list_add_tail(new: &mss->list, head: &msq->q_senders); |
204 | } |
205 | |
206 | static inline void ss_del(struct msg_sender *mss) |
207 | { |
208 | if (mss->list.next) |
209 | list_del(entry: &mss->list); |
210 | } |
211 | |
212 | static void ss_wakeup(struct msg_queue *msq, |
213 | struct wake_q_head *wake_q, bool kill) |
214 | { |
215 | struct msg_sender *mss, *t; |
216 | struct task_struct *stop_tsk = NULL; |
217 | struct list_head *h = &msq->q_senders; |
218 | |
219 | list_for_each_entry_safe(mss, t, h, list) { |
220 | if (kill) |
221 | mss->list.next = NULL; |
222 | |
223 | /* |
224 | * Stop at the first task we don't wakeup, |
225 | * we've already iterated the original |
226 | * sender queue. |
227 | */ |
228 | else if (stop_tsk == mss->tsk) |
229 | break; |
230 | /* |
231 | * We are not in an EIDRM scenario here, therefore |
232 | * verify that we really need to wakeup the task. |
233 | * To maintain current semantics and wakeup order, |
234 | * move the sender to the tail on behalf of the |
235 | * blocked task. |
236 | */ |
237 | else if (!msg_fits_inqueue(msq, msgsz: mss->msgsz)) { |
238 | if (!stop_tsk) |
239 | stop_tsk = mss->tsk; |
240 | |
241 | list_move_tail(list: &mss->list, head: &msq->q_senders); |
242 | continue; |
243 | } |
244 | |
245 | wake_q_add(head: wake_q, task: mss->tsk); |
246 | } |
247 | } |
248 | |
249 | static void expunge_all(struct msg_queue *msq, int res, |
250 | struct wake_q_head *wake_q) |
251 | { |
252 | struct msg_receiver *msr, *t; |
253 | |
254 | list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { |
255 | struct task_struct *r_tsk; |
256 | |
257 | r_tsk = get_task_struct(t: msr->r_tsk); |
258 | |
259 | /* see MSG_BARRIER for purpose/pairing */ |
260 | smp_store_release(&msr->r_msg, ERR_PTR(res)); |
261 | wake_q_add_safe(head: wake_q, task: r_tsk); |
262 | } |
263 | } |
264 | |
265 | /* |
266 | * freeque() wakes up waiters on the sender and receiver waiting queue, |
267 | * removes the message queue from message queue ID IDR, and cleans up all the |
268 | * messages associated with this queue. |
269 | * |
270 | * msg_ids.rwsem (writer) and the spinlock for this message queue are held |
271 | * before freeque() is called. msg_ids.rwsem remains locked on exit. |
272 | */ |
273 | static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
274 | __releases(RCU) |
275 | __releases(&msq->q_perm) |
276 | { |
277 | struct msg_msg *msg, *t; |
278 | struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); |
279 | DEFINE_WAKE_Q(wake_q); |
280 | |
281 | expunge_all(msq, res: -EIDRM, wake_q: &wake_q); |
282 | ss_wakeup(msq, wake_q: &wake_q, kill: true); |
283 | msg_rmid(ns, s: msq); |
284 | ipc_unlock_object(perm: &msq->q_perm); |
285 | wake_up_q(head: &wake_q); |
286 | rcu_read_unlock(); |
287 | |
288 | list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { |
289 | percpu_counter_sub_local(fbc: &ns->percpu_msg_hdrs, amount: 1); |
290 | free_msg(msg); |
291 | } |
292 | percpu_counter_sub_local(fbc: &ns->percpu_msg_bytes, amount: msq->q_cbytes); |
293 | ipc_update_pid(pos: &msq->q_lspid, NULL); |
294 | ipc_update_pid(pos: &msq->q_lrpid, NULL); |
295 | ipc_rcu_putref(ptr: &msq->q_perm, func: msg_rcu_free); |
296 | } |
297 | |
298 | long ksys_msgget(key_t key, int msgflg) |
299 | { |
300 | struct ipc_namespace *ns; |
301 | static const struct ipc_ops msg_ops = { |
302 | .getnew = newque, |
303 | .associate = security_msg_queue_associate, |
304 | }; |
305 | struct ipc_params msg_params; |
306 | |
307 | ns = current->nsproxy->ipc_ns; |
308 | |
309 | msg_params.key = key; |
310 | msg_params.flg = msgflg; |
311 | |
312 | return ipcget(ns, ids: &msg_ids(ns), ops: &msg_ops, params: &msg_params); |
313 | } |
314 | |
315 | SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) |
316 | { |
317 | return ksys_msgget(key, msgflg); |
318 | } |
319 | |
320 | static inline unsigned long |
321 | copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) |
322 | { |
323 | switch (version) { |
324 | case IPC_64: |
325 | return copy_to_user(to: buf, from: in, n: sizeof(*in)); |
326 | case IPC_OLD: |
327 | { |
328 | struct msqid_ds out; |
329 | |
330 | memset(&out, 0, sizeof(out)); |
331 | |
332 | ipc64_perm_to_ipc_perm(in: &in->msg_perm, out: &out.msg_perm); |
333 | |
334 | out.msg_stime = in->msg_stime; |
335 | out.msg_rtime = in->msg_rtime; |
336 | out.msg_ctime = in->msg_ctime; |
337 | |
338 | if (in->msg_cbytes > USHRT_MAX) |
339 | out.msg_cbytes = USHRT_MAX; |
340 | else |
341 | out.msg_cbytes = in->msg_cbytes; |
342 | out.msg_lcbytes = in->msg_cbytes; |
343 | |
344 | if (in->msg_qnum > USHRT_MAX) |
345 | out.msg_qnum = USHRT_MAX; |
346 | else |
347 | out.msg_qnum = in->msg_qnum; |
348 | |
349 | if (in->msg_qbytes > USHRT_MAX) |
350 | out.msg_qbytes = USHRT_MAX; |
351 | else |
352 | out.msg_qbytes = in->msg_qbytes; |
353 | out.msg_lqbytes = in->msg_qbytes; |
354 | |
355 | out.msg_lspid = in->msg_lspid; |
356 | out.msg_lrpid = in->msg_lrpid; |
357 | |
358 | return copy_to_user(to: buf, from: &out, n: sizeof(out)); |
359 | } |
360 | default: |
361 | return -EINVAL; |
362 | } |
363 | } |
364 | |
365 | static inline unsigned long |
366 | copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) |
367 | { |
368 | switch (version) { |
369 | case IPC_64: |
370 | if (copy_from_user(to: out, from: buf, n: sizeof(*out))) |
371 | return -EFAULT; |
372 | return 0; |
373 | case IPC_OLD: |
374 | { |
375 | struct msqid_ds tbuf_old; |
376 | |
377 | if (copy_from_user(to: &tbuf_old, from: buf, n: sizeof(tbuf_old))) |
378 | return -EFAULT; |
379 | |
380 | out->msg_perm.uid = tbuf_old.msg_perm.uid; |
381 | out->msg_perm.gid = tbuf_old.msg_perm.gid; |
382 | out->msg_perm.mode = tbuf_old.msg_perm.mode; |
383 | |
384 | if (tbuf_old.msg_qbytes == 0) |
385 | out->msg_qbytes = tbuf_old.msg_lqbytes; |
386 | else |
387 | out->msg_qbytes = tbuf_old.msg_qbytes; |
388 | |
389 | return 0; |
390 | } |
391 | default: |
392 | return -EINVAL; |
393 | } |
394 | } |
395 | |
396 | /* |
397 | * This function handles some msgctl commands which require the rwsem |
398 | * to be held in write mode. |
399 | * NOTE: no locks must be held, the rwsem is taken inside this function. |
400 | */ |
401 | static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, |
402 | struct ipc64_perm *perm, int msg_qbytes) |
403 | { |
404 | struct kern_ipc_perm *ipcp; |
405 | struct msg_queue *msq; |
406 | int err; |
407 | |
408 | down_write(sem: &msg_ids(ns).rwsem); |
409 | rcu_read_lock(); |
410 | |
411 | ipcp = ipcctl_obtain_check(ns, ids: &msg_ids(ns), id: msqid, cmd, |
412 | perm, extra_perm: msg_qbytes); |
413 | if (IS_ERR(ptr: ipcp)) { |
414 | err = PTR_ERR(ptr: ipcp); |
415 | goto out_unlock1; |
416 | } |
417 | |
418 | msq = container_of(ipcp, struct msg_queue, q_perm); |
419 | |
420 | err = security_msg_queue_msgctl(msq: &msq->q_perm, cmd); |
421 | if (err) |
422 | goto out_unlock1; |
423 | |
424 | switch (cmd) { |
425 | case IPC_RMID: |
426 | ipc_lock_object(perm: &msq->q_perm); |
427 | /* freeque unlocks the ipc object and rcu */ |
428 | freeque(ns, ipcp); |
429 | goto out_up; |
430 | case IPC_SET: |
431 | { |
432 | DEFINE_WAKE_Q(wake_q); |
433 | |
434 | if (msg_qbytes > ns->msg_ctlmnb && |
435 | !capable(CAP_SYS_RESOURCE)) { |
436 | err = -EPERM; |
437 | goto out_unlock1; |
438 | } |
439 | |
440 | ipc_lock_object(perm: &msq->q_perm); |
441 | err = ipc_update_perm(in: perm, out: ipcp); |
442 | if (err) |
443 | goto out_unlock0; |
444 | |
445 | msq->q_qbytes = msg_qbytes; |
446 | |
447 | msq->q_ctime = ktime_get_real_seconds(); |
448 | /* |
449 | * Sleeping receivers might be excluded by |
450 | * stricter permissions. |
451 | */ |
452 | expunge_all(msq, res: -EAGAIN, wake_q: &wake_q); |
453 | /* |
454 | * Sleeping senders might be able to send |
455 | * due to a larger queue size. |
456 | */ |
457 | ss_wakeup(msq, wake_q: &wake_q, kill: false); |
458 | ipc_unlock_object(perm: &msq->q_perm); |
459 | wake_up_q(head: &wake_q); |
460 | |
461 | goto out_unlock1; |
462 | } |
463 | default: |
464 | err = -EINVAL; |
465 | goto out_unlock1; |
466 | } |
467 | |
468 | out_unlock0: |
469 | ipc_unlock_object(perm: &msq->q_perm); |
470 | out_unlock1: |
471 | rcu_read_unlock(); |
472 | out_up: |
473 | up_write(sem: &msg_ids(ns).rwsem); |
474 | return err; |
475 | } |
476 | |
477 | static int msgctl_info(struct ipc_namespace *ns, int msqid, |
478 | int cmd, struct msginfo *msginfo) |
479 | { |
480 | int err; |
481 | int max_idx; |
482 | |
483 | /* |
484 | * We must not return kernel stack data. |
485 | * due to padding, it's not enough |
486 | * to set all member fields. |
487 | */ |
488 | err = security_msg_queue_msgctl(NULL, cmd); |
489 | if (err) |
490 | return err; |
491 | |
492 | memset(msginfo, 0, sizeof(*msginfo)); |
493 | msginfo->msgmni = ns->msg_ctlmni; |
494 | msginfo->msgmax = ns->msg_ctlmax; |
495 | msginfo->msgmnb = ns->msg_ctlmnb; |
496 | msginfo->msgssz = MSGSSZ; |
497 | msginfo->msgseg = MSGSEG; |
498 | down_read(sem: &msg_ids(ns).rwsem); |
499 | if (cmd == MSG_INFO) |
500 | msginfo->msgpool = msg_ids(ns).in_use; |
501 | max_idx = ipc_get_maxidx(ids: &msg_ids(ns)); |
502 | up_read(sem: &msg_ids(ns).rwsem); |
503 | if (cmd == MSG_INFO) { |
504 | msginfo->msgmap = min_t(int, |
505 | percpu_counter_sum(&ns->percpu_msg_hdrs), |
506 | INT_MAX); |
507 | msginfo->msgtql = min_t(int, |
508 | percpu_counter_sum(&ns->percpu_msg_bytes), |
509 | INT_MAX); |
510 | } else { |
511 | msginfo->msgmap = MSGMAP; |
512 | msginfo->msgpool = MSGPOOL; |
513 | msginfo->msgtql = MSGTQL; |
514 | } |
515 | return (max_idx < 0) ? 0 : max_idx; |
516 | } |
517 | |
518 | static int msgctl_stat(struct ipc_namespace *ns, int msqid, |
519 | int cmd, struct msqid64_ds *p) |
520 | { |
521 | struct msg_queue *msq; |
522 | int err; |
523 | |
524 | memset(p, 0, sizeof(*p)); |
525 | |
526 | rcu_read_lock(); |
527 | if (cmd == MSG_STAT || cmd == MSG_STAT_ANY) { |
528 | msq = msq_obtain_object(ns, id: msqid); |
529 | if (IS_ERR(ptr: msq)) { |
530 | err = PTR_ERR(ptr: msq); |
531 | goto out_unlock; |
532 | } |
533 | } else { /* IPC_STAT */ |
534 | msq = msq_obtain_object_check(ns, id: msqid); |
535 | if (IS_ERR(ptr: msq)) { |
536 | err = PTR_ERR(ptr: msq); |
537 | goto out_unlock; |
538 | } |
539 | } |
540 | |
541 | /* see comment for SHM_STAT_ANY */ |
542 | if (cmd == MSG_STAT_ANY) |
543 | audit_ipc_obj(ipcp: &msq->q_perm); |
544 | else { |
545 | err = -EACCES; |
546 | if (ipcperms(ns, ipcp: &msq->q_perm, S_IRUGO)) |
547 | goto out_unlock; |
548 | } |
549 | |
550 | err = security_msg_queue_msgctl(msq: &msq->q_perm, cmd); |
551 | if (err) |
552 | goto out_unlock; |
553 | |
554 | ipc_lock_object(perm: &msq->q_perm); |
555 | |
556 | if (!ipc_valid_object(perm: &msq->q_perm)) { |
557 | ipc_unlock_object(perm: &msq->q_perm); |
558 | err = -EIDRM; |
559 | goto out_unlock; |
560 | } |
561 | |
562 | kernel_to_ipc64_perm(in: &msq->q_perm, out: &p->msg_perm); |
563 | p->msg_stime = msq->q_stime; |
564 | p->msg_rtime = msq->q_rtime; |
565 | p->msg_ctime = msq->q_ctime; |
566 | #ifndef CONFIG_64BIT |
567 | p->msg_stime_high = msq->q_stime >> 32; |
568 | p->msg_rtime_high = msq->q_rtime >> 32; |
569 | p->msg_ctime_high = msq->q_ctime >> 32; |
570 | #endif |
571 | p->msg_cbytes = msq->q_cbytes; |
572 | p->msg_qnum = msq->q_qnum; |
573 | p->msg_qbytes = msq->q_qbytes; |
574 | p->msg_lspid = pid_vnr(pid: msq->q_lspid); |
575 | p->msg_lrpid = pid_vnr(pid: msq->q_lrpid); |
576 | |
577 | if (cmd == IPC_STAT) { |
578 | /* |
579 | * As defined in SUS: |
580 | * Return 0 on success |
581 | */ |
582 | err = 0; |
583 | } else { |
584 | /* |
585 | * MSG_STAT and MSG_STAT_ANY (both Linux specific) |
586 | * Return the full id, including the sequence number |
587 | */ |
588 | err = msq->q_perm.id; |
589 | } |
590 | |
591 | ipc_unlock_object(perm: &msq->q_perm); |
592 | out_unlock: |
593 | rcu_read_unlock(); |
594 | return err; |
595 | } |
596 | |
597 | static long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf, int version) |
598 | { |
599 | struct ipc_namespace *ns; |
600 | struct msqid64_ds msqid64; |
601 | int err; |
602 | |
603 | if (msqid < 0 || cmd < 0) |
604 | return -EINVAL; |
605 | |
606 | ns = current->nsproxy->ipc_ns; |
607 | |
608 | switch (cmd) { |
609 | case IPC_INFO: |
610 | case MSG_INFO: { |
611 | struct msginfo msginfo; |
612 | err = msgctl_info(ns, msqid, cmd, msginfo: &msginfo); |
613 | if (err < 0) |
614 | return err; |
615 | if (copy_to_user(to: buf, from: &msginfo, n: sizeof(struct msginfo))) |
616 | err = -EFAULT; |
617 | return err; |
618 | } |
619 | case MSG_STAT: /* msqid is an index rather than a msg queue id */ |
620 | case MSG_STAT_ANY: |
621 | case IPC_STAT: |
622 | err = msgctl_stat(ns, msqid, cmd, p: &msqid64); |
623 | if (err < 0) |
624 | return err; |
625 | if (copy_msqid_to_user(buf, in: &msqid64, version)) |
626 | err = -EFAULT; |
627 | return err; |
628 | case IPC_SET: |
629 | if (copy_msqid_from_user(out: &msqid64, buf, version)) |
630 | return -EFAULT; |
631 | return msgctl_down(ns, msqid, cmd, perm: &msqid64.msg_perm, |
632 | msg_qbytes: msqid64.msg_qbytes); |
633 | case IPC_RMID: |
634 | return msgctl_down(ns, msqid, cmd, NULL, msg_qbytes: 0); |
635 | default: |
636 | return -EINVAL; |
637 | } |
638 | } |
639 | |
640 | SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) |
641 | { |
642 | return ksys_msgctl(msqid, cmd, buf, IPC_64); |
643 | } |
644 | |
645 | #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION |
646 | long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) |
647 | { |
648 | int version = ipc_parse_version(&cmd); |
649 | |
650 | return ksys_msgctl(msqid, cmd, buf, version); |
651 | } |
652 | |
653 | SYSCALL_DEFINE3(old_msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) |
654 | { |
655 | return ksys_old_msgctl(msqid, cmd, buf); |
656 | } |
657 | #endif |
658 | |
659 | #ifdef CONFIG_COMPAT |
660 | |
661 | struct compat_msqid_ds { |
662 | struct compat_ipc_perm msg_perm; |
663 | compat_uptr_t msg_first; |
664 | compat_uptr_t msg_last; |
665 | old_time32_t msg_stime; |
666 | old_time32_t msg_rtime; |
667 | old_time32_t msg_ctime; |
668 | compat_ulong_t msg_lcbytes; |
669 | compat_ulong_t msg_lqbytes; |
670 | unsigned short msg_cbytes; |
671 | unsigned short msg_qnum; |
672 | unsigned short msg_qbytes; |
673 | compat_ipc_pid_t msg_lspid; |
674 | compat_ipc_pid_t msg_lrpid; |
675 | }; |
676 | |
677 | static int copy_compat_msqid_from_user(struct msqid64_ds *out, void __user *buf, |
678 | int version) |
679 | { |
680 | memset(out, 0, sizeof(*out)); |
681 | if (version == IPC_64) { |
682 | struct compat_msqid64_ds __user *p = buf; |
683 | if (get_compat_ipc64_perm(&out->msg_perm, &p->msg_perm)) |
684 | return -EFAULT; |
685 | if (get_user(out->msg_qbytes, &p->msg_qbytes)) |
686 | return -EFAULT; |
687 | } else { |
688 | struct compat_msqid_ds __user *p = buf; |
689 | if (get_compat_ipc_perm(&out->msg_perm, &p->msg_perm)) |
690 | return -EFAULT; |
691 | if (get_user(out->msg_qbytes, &p->msg_qbytes)) |
692 | return -EFAULT; |
693 | } |
694 | return 0; |
695 | } |
696 | |
697 | static int copy_compat_msqid_to_user(void __user *buf, struct msqid64_ds *in, |
698 | int version) |
699 | { |
700 | if (version == IPC_64) { |
701 | struct compat_msqid64_ds v; |
702 | memset(&v, 0, sizeof(v)); |
703 | to_compat_ipc64_perm(&v.msg_perm, &in->msg_perm); |
704 | v.msg_stime = lower_32_bits(in->msg_stime); |
705 | v.msg_stime_high = upper_32_bits(in->msg_stime); |
706 | v.msg_rtime = lower_32_bits(in->msg_rtime); |
707 | v.msg_rtime_high = upper_32_bits(in->msg_rtime); |
708 | v.msg_ctime = lower_32_bits(in->msg_ctime); |
709 | v.msg_ctime_high = upper_32_bits(in->msg_ctime); |
710 | v.msg_cbytes = in->msg_cbytes; |
711 | v.msg_qnum = in->msg_qnum; |
712 | v.msg_qbytes = in->msg_qbytes; |
713 | v.msg_lspid = in->msg_lspid; |
714 | v.msg_lrpid = in->msg_lrpid; |
715 | return copy_to_user(to: buf, from: &v, n: sizeof(v)); |
716 | } else { |
717 | struct compat_msqid_ds v; |
718 | memset(&v, 0, sizeof(v)); |
719 | to_compat_ipc_perm(&v.msg_perm, &in->msg_perm); |
720 | v.msg_stime = in->msg_stime; |
721 | v.msg_rtime = in->msg_rtime; |
722 | v.msg_ctime = in->msg_ctime; |
723 | v.msg_cbytes = in->msg_cbytes; |
724 | v.msg_qnum = in->msg_qnum; |
725 | v.msg_qbytes = in->msg_qbytes; |
726 | v.msg_lspid = in->msg_lspid; |
727 | v.msg_lrpid = in->msg_lrpid; |
728 | return copy_to_user(to: buf, from: &v, n: sizeof(v)); |
729 | } |
730 | } |
731 | |
732 | static long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr, int version) |
733 | { |
734 | struct ipc_namespace *ns; |
735 | int err; |
736 | struct msqid64_ds msqid64; |
737 | |
738 | ns = current->nsproxy->ipc_ns; |
739 | |
740 | if (msqid < 0 || cmd < 0) |
741 | return -EINVAL; |
742 | |
743 | switch (cmd & (~IPC_64)) { |
744 | case IPC_INFO: |
745 | case MSG_INFO: { |
746 | struct msginfo msginfo; |
747 | err = msgctl_info(ns, msqid, cmd, msginfo: &msginfo); |
748 | if (err < 0) |
749 | return err; |
750 | if (copy_to_user(to: uptr, from: &msginfo, n: sizeof(struct msginfo))) |
751 | err = -EFAULT; |
752 | return err; |
753 | } |
754 | case IPC_STAT: |
755 | case MSG_STAT: |
756 | case MSG_STAT_ANY: |
757 | err = msgctl_stat(ns, msqid, cmd, p: &msqid64); |
758 | if (err < 0) |
759 | return err; |
760 | if (copy_compat_msqid_to_user(buf: uptr, in: &msqid64, version)) |
761 | err = -EFAULT; |
762 | return err; |
763 | case IPC_SET: |
764 | if (copy_compat_msqid_from_user(out: &msqid64, buf: uptr, version)) |
765 | return -EFAULT; |
766 | return msgctl_down(ns, msqid, cmd, perm: &msqid64.msg_perm, msg_qbytes: msqid64.msg_qbytes); |
767 | case IPC_RMID: |
768 | return msgctl_down(ns, msqid, cmd, NULL, msg_qbytes: 0); |
769 | default: |
770 | return -EINVAL; |
771 | } |
772 | } |
773 | |
774 | COMPAT_SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, void __user *, uptr) |
775 | { |
776 | return compat_ksys_msgctl(msqid, cmd, uptr, IPC_64); |
777 | } |
778 | |
779 | #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION |
780 | long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr) |
781 | { |
782 | int version = compat_ipc_parse_version(cmd: &cmd); |
783 | |
784 | return compat_ksys_msgctl(msqid, cmd, uptr, version); |
785 | } |
786 | |
787 | COMPAT_SYSCALL_DEFINE3(old_msgctl, int, msqid, int, cmd, void __user *, uptr) |
788 | { |
789 | return compat_ksys_old_msgctl(msqid, cmd, uptr); |
790 | } |
791 | #endif |
792 | #endif |
793 | |
794 | static int testmsg(struct msg_msg *msg, long type, int mode) |
795 | { |
796 | switch (mode) { |
797 | case SEARCH_ANY: |
798 | case SEARCH_NUMBER: |
799 | return 1; |
800 | case SEARCH_LESSEQUAL: |
801 | if (msg->m_type <= type) |
802 | return 1; |
803 | break; |
804 | case SEARCH_EQUAL: |
805 | if (msg->m_type == type) |
806 | return 1; |
807 | break; |
808 | case SEARCH_NOTEQUAL: |
809 | if (msg->m_type != type) |
810 | return 1; |
811 | break; |
812 | } |
813 | return 0; |
814 | } |
815 | |
816 | static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg, |
817 | struct wake_q_head *wake_q) |
818 | { |
819 | struct msg_receiver *msr, *t; |
820 | |
821 | list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { |
822 | if (testmsg(msg, type: msr->r_msgtype, mode: msr->r_mode) && |
823 | !security_msg_queue_msgrcv(msq: &msq->q_perm, msg, target: msr->r_tsk, |
824 | type: msr->r_msgtype, mode: msr->r_mode)) { |
825 | |
826 | list_del(entry: &msr->r_list); |
827 | if (msr->r_maxsize < msg->m_ts) { |
828 | wake_q_add(head: wake_q, task: msr->r_tsk); |
829 | |
830 | /* See expunge_all regarding memory barrier */ |
831 | smp_store_release(&msr->r_msg, ERR_PTR(-E2BIG)); |
832 | } else { |
833 | ipc_update_pid(pos: &msq->q_lrpid, pid: task_pid(task: msr->r_tsk)); |
834 | msq->q_rtime = ktime_get_real_seconds(); |
835 | |
836 | wake_q_add(head: wake_q, task: msr->r_tsk); |
837 | |
838 | /* See expunge_all regarding memory barrier */ |
839 | smp_store_release(&msr->r_msg, msg); |
840 | return 1; |
841 | } |
842 | } |
843 | } |
844 | |
845 | return 0; |
846 | } |
847 | |
848 | static long do_msgsnd(int msqid, long mtype, void __user *mtext, |
849 | size_t msgsz, int msgflg) |
850 | { |
851 | struct msg_queue *msq; |
852 | struct msg_msg *msg; |
853 | int err; |
854 | struct ipc_namespace *ns; |
855 | DEFINE_WAKE_Q(wake_q); |
856 | |
857 | ns = current->nsproxy->ipc_ns; |
858 | |
859 | if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) |
860 | return -EINVAL; |
861 | if (mtype < 1) |
862 | return -EINVAL; |
863 | |
864 | msg = load_msg(src: mtext, len: msgsz); |
865 | if (IS_ERR(ptr: msg)) |
866 | return PTR_ERR(ptr: msg); |
867 | |
868 | msg->m_type = mtype; |
869 | msg->m_ts = msgsz; |
870 | |
871 | rcu_read_lock(); |
872 | msq = msq_obtain_object_check(ns, id: msqid); |
873 | if (IS_ERR(ptr: msq)) { |
874 | err = PTR_ERR(ptr: msq); |
875 | goto out_unlock1; |
876 | } |
877 | |
878 | ipc_lock_object(perm: &msq->q_perm); |
879 | |
880 | for (;;) { |
881 | struct msg_sender s; |
882 | |
883 | err = -EACCES; |
884 | if (ipcperms(ns, ipcp: &msq->q_perm, S_IWUGO)) |
885 | goto out_unlock0; |
886 | |
887 | /* raced with RMID? */ |
888 | if (!ipc_valid_object(perm: &msq->q_perm)) { |
889 | err = -EIDRM; |
890 | goto out_unlock0; |
891 | } |
892 | |
893 | err = security_msg_queue_msgsnd(msq: &msq->q_perm, msg, msqflg: msgflg); |
894 | if (err) |
895 | goto out_unlock0; |
896 | |
897 | if (msg_fits_inqueue(msq, msgsz)) |
898 | break; |
899 | |
900 | /* queue full, wait: */ |
901 | if (msgflg & IPC_NOWAIT) { |
902 | err = -EAGAIN; |
903 | goto out_unlock0; |
904 | } |
905 | |
906 | /* enqueue the sender and prepare to block */ |
907 | ss_add(msq, mss: &s, msgsz); |
908 | |
909 | if (!ipc_rcu_getref(ptr: &msq->q_perm)) { |
910 | err = -EIDRM; |
911 | goto out_unlock0; |
912 | } |
913 | |
914 | ipc_unlock_object(perm: &msq->q_perm); |
915 | rcu_read_unlock(); |
916 | schedule(); |
917 | |
918 | rcu_read_lock(); |
919 | ipc_lock_object(perm: &msq->q_perm); |
920 | |
921 | ipc_rcu_putref(ptr: &msq->q_perm, func: msg_rcu_free); |
922 | /* raced with RMID? */ |
923 | if (!ipc_valid_object(perm: &msq->q_perm)) { |
924 | err = -EIDRM; |
925 | goto out_unlock0; |
926 | } |
927 | ss_del(mss: &s); |
928 | |
929 | if (signal_pending(current)) { |
930 | err = -ERESTARTNOHAND; |
931 | goto out_unlock0; |
932 | } |
933 | |
934 | } |
935 | |
936 | ipc_update_pid(pos: &msq->q_lspid, pid: task_tgid(current)); |
937 | msq->q_stime = ktime_get_real_seconds(); |
938 | |
939 | if (!pipelined_send(msq, msg, wake_q: &wake_q)) { |
940 | /* no one is waiting for this message, enqueue it */ |
941 | list_add_tail(new: &msg->m_list, head: &msq->q_messages); |
942 | msq->q_cbytes += msgsz; |
943 | msq->q_qnum++; |
944 | percpu_counter_add_local(fbc: &ns->percpu_msg_bytes, amount: msgsz); |
945 | percpu_counter_add_local(fbc: &ns->percpu_msg_hdrs, amount: 1); |
946 | } |
947 | |
948 | err = 0; |
949 | msg = NULL; |
950 | |
951 | out_unlock0: |
952 | ipc_unlock_object(perm: &msq->q_perm); |
953 | wake_up_q(head: &wake_q); |
954 | out_unlock1: |
955 | rcu_read_unlock(); |
956 | if (msg != NULL) |
957 | free_msg(msg); |
958 | return err; |
959 | } |
960 | |
961 | long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, |
962 | int msgflg) |
963 | { |
964 | long mtype; |
965 | |
966 | if (get_user(mtype, &msgp->mtype)) |
967 | return -EFAULT; |
968 | return do_msgsnd(msqid, mtype, mtext: msgp->mtext, msgsz, msgflg); |
969 | } |
970 | |
971 | SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, |
972 | int, msgflg) |
973 | { |
974 | return ksys_msgsnd(msqid, msgp, msgsz, msgflg); |
975 | } |
976 | |
977 | #ifdef CONFIG_COMPAT |
978 | |
979 | struct compat_msgbuf { |
980 | compat_long_t mtype; |
981 | char mtext[1]; |
982 | }; |
983 | |
984 | long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp, |
985 | compat_ssize_t msgsz, int msgflg) |
986 | { |
987 | struct compat_msgbuf __user *up = compat_ptr(uptr: msgp); |
988 | compat_long_t mtype; |
989 | |
990 | if (get_user(mtype, &up->mtype)) |
991 | return -EFAULT; |
992 | return do_msgsnd(msqid, mtype, mtext: up->mtext, msgsz: (ssize_t)msgsz, msgflg); |
993 | } |
994 | |
995 | COMPAT_SYSCALL_DEFINE4(msgsnd, int, msqid, compat_uptr_t, msgp, |
996 | compat_ssize_t, msgsz, int, msgflg) |
997 | { |
998 | return compat_ksys_msgsnd(msqid, msgp, msgsz, msgflg); |
999 | } |
1000 | #endif |
1001 | |
1002 | static inline int convert_mode(long *msgtyp, int msgflg) |
1003 | { |
1004 | if (msgflg & MSG_COPY) |
1005 | return SEARCH_NUMBER; |
1006 | /* |
1007 | * find message of correct type. |
1008 | * msgtyp = 0 => get first. |
1009 | * msgtyp > 0 => get first message of matching type. |
1010 | * msgtyp < 0 => get message with least type must be < abs(msgtype). |
1011 | */ |
1012 | if (*msgtyp == 0) |
1013 | return SEARCH_ANY; |
1014 | if (*msgtyp < 0) { |
1015 | if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */ |
1016 | *msgtyp = LONG_MAX; |
1017 | else |
1018 | *msgtyp = -*msgtyp; |
1019 | return SEARCH_LESSEQUAL; |
1020 | } |
1021 | if (msgflg & MSG_EXCEPT) |
1022 | return SEARCH_NOTEQUAL; |
1023 | return SEARCH_EQUAL; |
1024 | } |
1025 | |
1026 | static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) |
1027 | { |
1028 | struct msgbuf __user *msgp = dest; |
1029 | size_t msgsz; |
1030 | |
1031 | if (put_user(msg->m_type, &msgp->mtype)) |
1032 | return -EFAULT; |
1033 | |
1034 | msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; |
1035 | if (store_msg(dest: msgp->mtext, msg, len: msgsz)) |
1036 | return -EFAULT; |
1037 | return msgsz; |
1038 | } |
1039 | |
1040 | #ifdef CONFIG_CHECKPOINT_RESTORE |
1041 | /* |
1042 | * This function creates new kernel message structure, large enough to store |
1043 | * bufsz message bytes. |
1044 | */ |
1045 | static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) |
1046 | { |
1047 | struct msg_msg *copy; |
1048 | |
1049 | /* |
1050 | * Create dummy message to copy real message to. |
1051 | */ |
1052 | copy = load_msg(src: buf, len: bufsz); |
1053 | if (!IS_ERR(ptr: copy)) |
1054 | copy->m_ts = bufsz; |
1055 | return copy; |
1056 | } |
1057 | |
1058 | static inline void free_copy(struct msg_msg *copy) |
1059 | { |
1060 | if (copy) |
1061 | free_msg(msg: copy); |
1062 | } |
1063 | #else |
1064 | static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) |
1065 | { |
1066 | return ERR_PTR(-ENOSYS); |
1067 | } |
1068 | |
1069 | static inline void free_copy(struct msg_msg *copy) |
1070 | { |
1071 | } |
1072 | #endif |
1073 | |
1074 | static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) |
1075 | { |
1076 | struct msg_msg *msg, *found = NULL; |
1077 | long count = 0; |
1078 | |
1079 | list_for_each_entry(msg, &msq->q_messages, m_list) { |
1080 | if (testmsg(msg, type: *msgtyp, mode) && |
1081 | !security_msg_queue_msgrcv(msq: &msq->q_perm, msg, current, |
1082 | type: *msgtyp, mode)) { |
1083 | if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { |
1084 | *msgtyp = msg->m_type - 1; |
1085 | found = msg; |
1086 | } else if (mode == SEARCH_NUMBER) { |
1087 | if (*msgtyp == count) |
1088 | return msg; |
1089 | } else |
1090 | return msg; |
1091 | count++; |
1092 | } |
1093 | } |
1094 | |
1095 | return found ?: ERR_PTR(error: -EAGAIN); |
1096 | } |
1097 | |
1098 | static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, |
1099 | long (*msg_handler)(void __user *, struct msg_msg *, size_t)) |
1100 | { |
1101 | int mode; |
1102 | struct msg_queue *msq; |
1103 | struct ipc_namespace *ns; |
1104 | struct msg_msg *msg, *copy = NULL; |
1105 | DEFINE_WAKE_Q(wake_q); |
1106 | |
1107 | ns = current->nsproxy->ipc_ns; |
1108 | |
1109 | if (msqid < 0 || (long) bufsz < 0) |
1110 | return -EINVAL; |
1111 | |
1112 | if (msgflg & MSG_COPY) { |
1113 | if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT)) |
1114 | return -EINVAL; |
1115 | copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); |
1116 | if (IS_ERR(ptr: copy)) |
1117 | return PTR_ERR(ptr: copy); |
1118 | } |
1119 | mode = convert_mode(msgtyp: &msgtyp, msgflg); |
1120 | |
1121 | rcu_read_lock(); |
1122 | msq = msq_obtain_object_check(ns, id: msqid); |
1123 | if (IS_ERR(ptr: msq)) { |
1124 | rcu_read_unlock(); |
1125 | free_copy(copy); |
1126 | return PTR_ERR(ptr: msq); |
1127 | } |
1128 | |
1129 | for (;;) { |
1130 | struct msg_receiver msr_d; |
1131 | |
1132 | msg = ERR_PTR(error: -EACCES); |
1133 | if (ipcperms(ns, ipcp: &msq->q_perm, S_IRUGO)) |
1134 | goto out_unlock1; |
1135 | |
1136 | ipc_lock_object(perm: &msq->q_perm); |
1137 | |
1138 | /* raced with RMID? */ |
1139 | if (!ipc_valid_object(perm: &msq->q_perm)) { |
1140 | msg = ERR_PTR(error: -EIDRM); |
1141 | goto out_unlock0; |
1142 | } |
1143 | |
1144 | msg = find_msg(msq, msgtyp: &msgtyp, mode); |
1145 | if (!IS_ERR(ptr: msg)) { |
1146 | /* |
1147 | * Found a suitable message. |
1148 | * Unlink it from the queue. |
1149 | */ |
1150 | if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { |
1151 | msg = ERR_PTR(error: -E2BIG); |
1152 | goto out_unlock0; |
1153 | } |
1154 | /* |
1155 | * If we are copying, then do not unlink message and do |
1156 | * not update queue parameters. |
1157 | */ |
1158 | if (msgflg & MSG_COPY) { |
1159 | msg = copy_msg(src: msg, dst: copy); |
1160 | goto out_unlock0; |
1161 | } |
1162 | |
1163 | list_del(entry: &msg->m_list); |
1164 | msq->q_qnum--; |
1165 | msq->q_rtime = ktime_get_real_seconds(); |
1166 | ipc_update_pid(pos: &msq->q_lrpid, pid: task_tgid(current)); |
1167 | msq->q_cbytes -= msg->m_ts; |
1168 | percpu_counter_sub_local(fbc: &ns->percpu_msg_bytes, amount: msg->m_ts); |
1169 | percpu_counter_sub_local(fbc: &ns->percpu_msg_hdrs, amount: 1); |
1170 | ss_wakeup(msq, wake_q: &wake_q, kill: false); |
1171 | |
1172 | goto out_unlock0; |
1173 | } |
1174 | |
1175 | /* No message waiting. Wait for a message */ |
1176 | if (msgflg & IPC_NOWAIT) { |
1177 | msg = ERR_PTR(error: -ENOMSG); |
1178 | goto out_unlock0; |
1179 | } |
1180 | |
1181 | list_add_tail(new: &msr_d.r_list, head: &msq->q_receivers); |
1182 | msr_d.r_tsk = current; |
1183 | msr_d.r_msgtype = msgtyp; |
1184 | msr_d.r_mode = mode; |
1185 | if (msgflg & MSG_NOERROR) |
1186 | msr_d.r_maxsize = INT_MAX; |
1187 | else |
1188 | msr_d.r_maxsize = bufsz; |
1189 | |
1190 | /* memory barrier not require due to ipc_lock_object() */ |
1191 | WRITE_ONCE(msr_d.r_msg, ERR_PTR(-EAGAIN)); |
1192 | |
1193 | /* memory barrier not required, we own ipc_lock_object() */ |
1194 | __set_current_state(TASK_INTERRUPTIBLE); |
1195 | |
1196 | ipc_unlock_object(perm: &msq->q_perm); |
1197 | rcu_read_unlock(); |
1198 | schedule(); |
1199 | |
1200 | /* |
1201 | * Lockless receive, part 1: |
1202 | * We don't hold a reference to the queue and getting a |
1203 | * reference would defeat the idea of a lockless operation, |
1204 | * thus the code relies on rcu to guarantee the existence of |
1205 | * msq: |
1206 | * Prior to destruction, expunge_all(-EIRDM) changes r_msg. |
1207 | * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. |
1208 | */ |
1209 | rcu_read_lock(); |
1210 | |
1211 | /* |
1212 | * Lockless receive, part 2: |
1213 | * The work in pipelined_send() and expunge_all(): |
1214 | * - Set pointer to message |
1215 | * - Queue the receiver task for later wakeup |
1216 | * - Wake up the process after the lock is dropped. |
1217 | * |
1218 | * Should the process wake up before this wakeup (due to a |
1219 | * signal) it will either see the message and continue ... |
1220 | */ |
1221 | msg = READ_ONCE(msr_d.r_msg); |
1222 | if (msg != ERR_PTR(error: -EAGAIN)) { |
1223 | /* see MSG_BARRIER for purpose/pairing */ |
1224 | smp_acquire__after_ctrl_dep(); |
1225 | |
1226 | goto out_unlock1; |
1227 | } |
1228 | |
1229 | /* |
1230 | * ... or see -EAGAIN, acquire the lock to check the message |
1231 | * again. |
1232 | */ |
1233 | ipc_lock_object(perm: &msq->q_perm); |
1234 | |
1235 | msg = READ_ONCE(msr_d.r_msg); |
1236 | if (msg != ERR_PTR(error: -EAGAIN)) |
1237 | goto out_unlock0; |
1238 | |
1239 | list_del(entry: &msr_d.r_list); |
1240 | if (signal_pending(current)) { |
1241 | msg = ERR_PTR(error: -ERESTARTNOHAND); |
1242 | goto out_unlock0; |
1243 | } |
1244 | |
1245 | ipc_unlock_object(perm: &msq->q_perm); |
1246 | } |
1247 | |
1248 | out_unlock0: |
1249 | ipc_unlock_object(perm: &msq->q_perm); |
1250 | wake_up_q(head: &wake_q); |
1251 | out_unlock1: |
1252 | rcu_read_unlock(); |
1253 | if (IS_ERR(ptr: msg)) { |
1254 | free_copy(copy); |
1255 | return PTR_ERR(ptr: msg); |
1256 | } |
1257 | |
1258 | bufsz = msg_handler(buf, msg, bufsz); |
1259 | free_msg(msg); |
1260 | |
1261 | return bufsz; |
1262 | } |
1263 | |
1264 | long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, |
1265 | long msgtyp, int msgflg) |
1266 | { |
1267 | return do_msgrcv(msqid, buf: msgp, bufsz: msgsz, msgtyp, msgflg, msg_handler: do_msg_fill); |
1268 | } |
1269 | |
1270 | SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, |
1271 | long, msgtyp, int, msgflg) |
1272 | { |
1273 | return ksys_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg); |
1274 | } |
1275 | |
1276 | #ifdef CONFIG_COMPAT |
1277 | static long compat_do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) |
1278 | { |
1279 | struct compat_msgbuf __user *msgp = dest; |
1280 | size_t msgsz; |
1281 | |
1282 | if (put_user(msg->m_type, &msgp->mtype)) |
1283 | return -EFAULT; |
1284 | |
1285 | msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; |
1286 | if (store_msg(dest: msgp->mtext, msg, len: msgsz)) |
1287 | return -EFAULT; |
1288 | return msgsz; |
1289 | } |
1290 | |
1291 | long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, |
1292 | compat_long_t msgtyp, int msgflg) |
1293 | { |
1294 | return do_msgrcv(msqid, buf: compat_ptr(uptr: msgp), bufsz: (ssize_t)msgsz, msgtyp: (long)msgtyp, |
1295 | msgflg, msg_handler: compat_do_msg_fill); |
1296 | } |
1297 | |
1298 | COMPAT_SYSCALL_DEFINE5(msgrcv, int, msqid, compat_uptr_t, msgp, |
1299 | compat_ssize_t, msgsz, compat_long_t, msgtyp, |
1300 | int, msgflg) |
1301 | { |
1302 | return compat_ksys_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg); |
1303 | } |
1304 | #endif |
1305 | |
1306 | int msg_init_ns(struct ipc_namespace *ns) |
1307 | { |
1308 | int ret; |
1309 | |
1310 | ns->msg_ctlmax = MSGMAX; |
1311 | ns->msg_ctlmnb = MSGMNB; |
1312 | ns->msg_ctlmni = MSGMNI; |
1313 | |
1314 | ret = percpu_counter_init(&ns->percpu_msg_bytes, 0, GFP_KERNEL); |
1315 | if (ret) |
1316 | goto fail_msg_bytes; |
1317 | ret = percpu_counter_init(&ns->percpu_msg_hdrs, 0, GFP_KERNEL); |
1318 | if (ret) |
1319 | goto fail_msg_hdrs; |
1320 | ipc_init_ids(ids: &ns->ids[IPC_MSG_IDS]); |
1321 | return 0; |
1322 | |
1323 | fail_msg_hdrs: |
1324 | percpu_counter_destroy(fbc: &ns->percpu_msg_bytes); |
1325 | fail_msg_bytes: |
1326 | return ret; |
1327 | } |
1328 | |
1329 | #ifdef CONFIG_IPC_NS |
1330 | void msg_exit_ns(struct ipc_namespace *ns) |
1331 | { |
1332 | free_ipcs(ns, ids: &msg_ids(ns), free: freeque); |
1333 | idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); |
1334 | rhashtable_destroy(ht: &ns->ids[IPC_MSG_IDS].key_ht); |
1335 | percpu_counter_destroy(fbc: &ns->percpu_msg_bytes); |
1336 | percpu_counter_destroy(fbc: &ns->percpu_msg_hdrs); |
1337 | } |
1338 | #endif |
1339 | |
1340 | #ifdef CONFIG_PROC_FS |
1341 | static int sysvipc_msg_proc_show(struct seq_file *s, void *it) |
1342 | { |
1343 | struct pid_namespace *pid_ns = ipc_seq_pid_ns(s); |
1344 | struct user_namespace *user_ns = seq_user_ns(seq: s); |
1345 | struct kern_ipc_perm *ipcp = it; |
1346 | struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); |
1347 | |
1348 | seq_printf(m: s, |
1349 | fmt: "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10llu %10llu %10llu\n" , |
1350 | msq->q_perm.key, |
1351 | msq->q_perm.id, |
1352 | msq->q_perm.mode, |
1353 | msq->q_cbytes, |
1354 | msq->q_qnum, |
1355 | pid_nr_ns(pid: msq->q_lspid, ns: pid_ns), |
1356 | pid_nr_ns(pid: msq->q_lrpid, ns: pid_ns), |
1357 | from_kuid_munged(to: user_ns, uid: msq->q_perm.uid), |
1358 | from_kgid_munged(to: user_ns, gid: msq->q_perm.gid), |
1359 | from_kuid_munged(to: user_ns, uid: msq->q_perm.cuid), |
1360 | from_kgid_munged(to: user_ns, gid: msq->q_perm.cgid), |
1361 | msq->q_stime, |
1362 | msq->q_rtime, |
1363 | msq->q_ctime); |
1364 | |
1365 | return 0; |
1366 | } |
1367 | #endif |
1368 | |
1369 | void __init msg_init(void) |
1370 | { |
1371 | msg_init_ns(ns: &init_ipc_ns); |
1372 | |
1373 | ipc_init_proc_interface(path: "sysvipc/msg" , |
1374 | header: " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n" , |
1375 | IPC_MSG_IDS, show: sysvipc_msg_proc_show); |
1376 | } |
1377 | |