1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/ipc/msg.c
4 * Copyright (C) 1992 Krishna Balasubramanian
5 *
6 * Removed all the remaining kerneld mess
7 * Catch the -EFAULT stuff properly
8 * Use GFP_KERNEL for messages as in 1.2
9 * Fixed up the unchecked user space derefs
10 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 *
12 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 *
14 * mostly rewritten, threaded and wake-one semantics added
15 * MSGMAX limit removed, sysctl's added
16 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
17 *
18 * support for audit of ipc object properties and permission changes
19 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
20 *
21 * namespaces support
22 * OpenVZ, SWsoft Inc.
23 * Pavel Emelianov <xemul@openvz.org>
24 */
25
26 #include <linux/capability.h>
27 #include <linux/msg.h>
28 #include <linux/spinlock.h>
29 #include <linux/init.h>
30 #include <linux/mm.h>
31 #include <linux/proc_fs.h>
32 #include <linux/list.h>
33 #include <linux/security.h>
34 #include <linux/sched/wake_q.h>
35 #include <linux/syscalls.h>
36 #include <linux/audit.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/ipc_namespace.h>
41 #include <linux/rhashtable.h>
42
43 #include <asm/current.h>
44 #include <linux/uaccess.h>
45 #include "util.h"
46
47 /* one msq_queue structure for each present queue on the system */
48 struct msg_queue {
49 struct kern_ipc_perm q_perm;
50 time64_t q_stime; /* last msgsnd time */
51 time64_t q_rtime; /* last msgrcv time */
52 time64_t q_ctime; /* last change time */
53 unsigned long q_cbytes; /* current number of bytes on queue */
54 unsigned long q_qnum; /* number of messages in queue */
55 unsigned long q_qbytes; /* max number of bytes on queue */
56 struct pid *q_lspid; /* pid of last msgsnd */
57 struct pid *q_lrpid; /* last receive pid */
58
59 struct list_head q_messages;
60 struct list_head q_receivers;
61 struct list_head q_senders;
62 } __randomize_layout;
63
64 /*
65 * MSG_BARRIER Locking:
66 *
67 * Similar to the optimization used in ipc/mqueue.c, one syscall return path
68 * does not acquire any locks when it sees that a message exists in
69 * msg_receiver.r_msg. Therefore r_msg is set using smp_store_release()
70 * and accessed using READ_ONCE()+smp_acquire__after_ctrl_dep(). In addition,
71 * wake_q_add_safe() is used. See ipc/mqueue.c for more details
72 */
73
74 /* one msg_receiver structure for each sleeping receiver */
75 struct msg_receiver {
76 struct list_head r_list;
77 struct task_struct *r_tsk;
78
79 int r_mode;
80 long r_msgtype;
81 long r_maxsize;
82
83 struct msg_msg *r_msg;
84 };
85
86 /* one msg_sender for each sleeping sender */
87 struct msg_sender {
88 struct list_head list;
89 struct task_struct *tsk;
90 size_t msgsz;
91 };
92
93 #define SEARCH_ANY 1
94 #define SEARCH_EQUAL 2
95 #define SEARCH_NOTEQUAL 3
96 #define SEARCH_LESSEQUAL 4
97 #define SEARCH_NUMBER 5
98
99 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
100
msq_obtain_object(struct ipc_namespace * ns,int id)101 static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id)
102 {
103 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&msg_ids(ns), id);
104
105 if (IS_ERR(ipcp)) {
106 return ERR_CAST(ipcp);
107 }
108
109 return container_of(ipcp, struct msg_queue, q_perm);
110 }
111
msq_obtain_object_check(struct ipc_namespace * ns,int id)112 static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns, int id)
113 {
114 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id);
115
116 if (IS_ERR(ipcp)) {
117 return ERR_CAST(ipcp);
118 }
119
120 return container_of(ipcp, struct msg_queue, q_perm);
121 }
122
msg_rmid(struct ipc_namespace * ns,struct msg_queue * s)123 static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
124 {
125 ipc_rmid(&msg_ids(ns), &s->q_perm);
126 }
127
msg_rcu_free(struct rcu_head * head)128 static void msg_rcu_free(struct rcu_head *head)
129 {
130 struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
131 struct msg_queue *msq = container_of(p, struct msg_queue, q_perm);
132
133 security_msg_queue_free(&msq->q_perm);
134 kvfree(msq);
135 }
136
137 /**
138 * newque - Create a new msg queue
139 * @ns: namespace
140 * @params: ptr to the structure that contains the key and msgflg
141 *
142 * Called with msg_ids.rwsem held (writer)
143 */
newque(struct ipc_namespace * ns,struct ipc_params * params)144 static int newque(struct ipc_namespace *ns, struct ipc_params *params)
145 {
146 struct msg_queue *msq;
147 int retval;
148 key_t key = params->key;
149 int msgflg = params->flg;
150
151 msq = kvmalloc(sizeof(*msq), GFP_KERNEL);
152 if (unlikely(!msq)) {
153 return -ENOMEM;
154 }
155
156 msq->q_perm.mode = msgflg & S_IRWXUGO;
157 msq->q_perm.key = key;
158
159 msq->q_perm.security = NULL;
160 retval = security_msg_queue_alloc(&msq->q_perm);
161 if (retval) {
162 kvfree(msq);
163 return retval;
164 }
165
166 msq->q_stime = msq->q_rtime = 0;
167 msq->q_ctime = ktime_get_real_seconds();
168 msq->q_cbytes = msq->q_qnum = 0;
169 msq->q_qbytes = ns->msg_ctlmnb;
170 msq->q_lspid = msq->q_lrpid = NULL;
171 INIT_LIST_HEAD(&msq->q_messages);
172 INIT_LIST_HEAD(&msq->q_receivers);
173 INIT_LIST_HEAD(&msq->q_senders);
174
175 /* ipc_addid() locks msq upon success. */
176 retval = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
177 if (retval < 0) {
178 ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
179 return retval;
180 }
181
182 ipc_unlock_object(&msq->q_perm);
183 rcu_read_unlock();
184
185 return msq->q_perm.id;
186 }
187
msg_fits_inqueue(struct msg_queue * msq,size_t msgsz)188 static inline bool msg_fits_inqueue(struct msg_queue *msq, size_t msgsz)
189 {
190 return msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes;
191 }
192
ss_add(struct msg_queue * msq,struct msg_sender * mss,size_t msgsz)193 static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss, size_t msgsz)
194 {
195 mss->tsk = current;
196 mss->msgsz = msgsz;
197 /*
198 * No memory barrier required: we did ipc_lock_object(),
199 * and the waker obtains that lock before calling wake_q_add().
200 */
201 __set_current_state(TASK_INTERRUPTIBLE);
202 list_add_tail(&mss->list, &msq->q_senders);
203 }
204
ss_del(struct msg_sender * mss)205 static inline void ss_del(struct msg_sender *mss)
206 {
207 if (mss->list.next) {
208 list_del(&mss->list);
209 }
210 }
211
ss_wakeup(struct msg_queue * msq,struct wake_q_head * wake_q,bool kill)212 static void ss_wakeup(struct msg_queue *msq, struct wake_q_head *wake_q, bool kill)
213 {
214 struct msg_sender *mss, *t;
215 struct task_struct *stop_tsk = NULL;
216 struct list_head *h = &msq->q_senders;
217
218 list_for_each_entry_safe(mss, t, h, list)
219 {
220 if (kill) {
221 mss->list.next = NULL;
222 } else if (stop_tsk == mss->tsk) {
223 break;
224 } else if (!msg_fits_inqueue(msq, mss->msgsz)) {
225 if (!stop_tsk) {
226 stop_tsk = mss->tsk;
227 }
228
229 list_move_tail(&mss->list, &msq->q_senders);
230 continue;
231 }
232
233 wake_q_add(wake_q, mss->tsk);
234 }
235 }
236
expunge_all(struct msg_queue * msq,int res,struct wake_q_head * wake_q)237 static void expunge_all(struct msg_queue *msq, int res, struct wake_q_head *wake_q)
238 {
239 struct msg_receiver *msr, *t;
240
241 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list)
242 {
243 struct task_struct *r_tsk;
244
245 r_tsk = get_task_struct(msr->r_tsk);
246
247 /* see MSG_BARRIER for purpose/pairing */
248 smp_store_release(&msr->r_msg, ERR_PTR(res));
249 wake_q_add_safe(wake_q, r_tsk);
250 }
251 }
252
253 /*
254 * freeque() wakes up waiters on the sender and receiver waiting queue,
255 * removes the message queue from message queue ID IDR, and cleans up all the
256 * messages associated with this queue.
257 *
258 * msg_ids.rwsem (writer) and the spinlock for this message queue are held
259 * before freeque() is called. msg_ids.rwsem remains locked on exit.
260 */
freeque(struct ipc_namespace * ns,struct kern_ipc_perm * ipcp)261 static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) __releases(RCU) __releases(&msq->q_perm)
262 {
263 struct msg_msg *msg, *t;
264 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
265 DEFINE_WAKE_Q(wake_q);
266
267 expunge_all(msq, -EIDRM, &wake_q);
268 ss_wakeup(msq, &wake_q, true);
269 msg_rmid(ns, msq);
270 ipc_unlock_object(&msq->q_perm);
271 wake_up_q(&wake_q);
272 rcu_read_unlock();
273
274 list_for_each_entry_safe(msg, t, &msq->q_messages, m_list)
275 {
276 atomic_dec(&ns->msg_hdrs);
277 free_msg(msg);
278 }
279 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
280 ipc_update_pid(&msq->q_lspid, NULL);
281 ipc_update_pid(&msq->q_lrpid, NULL);
282 ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
283 }
284
ksys_msgget(key_t key,int msgflg)285 long ksys_msgget(key_t key, int msgflg)
286 {
287 struct ipc_namespace *ns;
288 static const struct ipc_ops msg_ops = {
289 .getnew = newque,
290 .associate = security_msg_queue_associate,
291 };
292 struct ipc_params msg_params;
293
294 ns = current->nsproxy->ipc_ns;
295
296 msg_params.key = key;
297 msg_params.flg = msgflg;
298
299 return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
300 }
301
SYSCALL_DEFINE2(msgget,key_t,key,int,msgflg)302 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
303 {
304 return ksys_msgget(key, msgflg);
305 }
306
copy_msqid_to_user(void __user * buf,struct msqid64_ds * in,int version)307 static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
308 {
309 switch (version) {
310 case IPC_64:
311 return copy_to_user(buf, in, sizeof(*in));
312 case IPC_OLD: {
313 struct msqid_ds out;
314
315 memset(&out, 0, sizeof(out));
316
317 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
318
319 out.msg_stime = in->msg_stime;
320 out.msg_rtime = in->msg_rtime;
321 out.msg_ctime = in->msg_ctime;
322
323 if (in->msg_cbytes > USHRT_MAX) {
324 out.msg_cbytes = USHRT_MAX;
325 } else {
326 out.msg_cbytes = in->msg_cbytes;
327 }
328 out.msg_lcbytes = in->msg_cbytes;
329
330 if (in->msg_qnum > USHRT_MAX) {
331 out.msg_qnum = USHRT_MAX;
332 } else {
333 out.msg_qnum = in->msg_qnum;
334 }
335
336 if (in->msg_qbytes > USHRT_MAX) {
337 out.msg_qbytes = USHRT_MAX;
338 } else {
339 out.msg_qbytes = in->msg_qbytes;
340 }
341 out.msg_lqbytes = in->msg_qbytes;
342
343 out.msg_lspid = in->msg_lspid;
344 out.msg_lrpid = in->msg_lrpid;
345
346 return copy_to_user(buf, &out, sizeof(out));
347 }
348 default:
349 return -EINVAL;
350 }
351 }
352
copy_msqid_from_user(struct msqid64_ds * out,void __user * buf,int version)353 static inline unsigned long copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
354 {
355 switch (version) {
356 case IPC_64:
357 if (copy_from_user(out, buf, sizeof(*out))) {
358 return -EFAULT;
359 }
360 return 0;
361 case IPC_OLD: {
362 struct msqid_ds tbuf_old;
363
364 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) {
365 return -EFAULT;
366 }
367
368 out->msg_perm.uid = tbuf_old.msg_perm.uid;
369 out->msg_perm.gid = tbuf_old.msg_perm.gid;
370 out->msg_perm.mode = tbuf_old.msg_perm.mode;
371
372 if (tbuf_old.msg_qbytes == 0) {
373 out->msg_qbytes = tbuf_old.msg_lqbytes;
374 } else {
375 out->msg_qbytes = tbuf_old.msg_qbytes;
376 }
377
378 return 0;
379 }
380 default:
381 return -EINVAL;
382 }
383 }
384
385 /*
386 * This function handles some msgctl commands which require the rwsem
387 * to be held in write mode.
388 * NOTE: no locks must be held, the rwsem is taken inside this function.
389 */
msgctl_down(struct ipc_namespace * ns,int msqid,int cmd,struct ipc64_perm * perm,int msg_qbytes)390 static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, struct ipc64_perm *perm, int msg_qbytes)
391 {
392 struct kern_ipc_perm *ipcp;
393 struct msg_queue *msq;
394 int err;
395
396 down_write(&msg_ids(ns).rwsem);
397 rcu_read_lock();
398
399 ipcp = ipcctl_obtain_check(ns, &msg_ids(ns), msqid, cmd, perm, msg_qbytes);
400 if (IS_ERR(ipcp)) {
401 err = PTR_ERR(ipcp);
402 goto out_unlock1;
403 }
404
405 msq = container_of(ipcp, struct msg_queue, q_perm);
406
407 err = security_msg_queue_msgctl(&msq->q_perm, cmd);
408 if (err) {
409 goto out_unlock1;
410 }
411
412 switch (cmd) {
413 case IPC_RMID:
414 ipc_lock_object(&msq->q_perm);
415 /* freeque unlocks the ipc object and rcu */
416 freeque(ns, ipcp);
417 goto out_up;
418 case IPC_SET: {
419 DEFINE_WAKE_Q(wake_q);
420
421 if (msg_qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) {
422 err = -EPERM;
423 goto out_unlock1;
424 }
425
426 ipc_lock_object(&msq->q_perm);
427 err = ipc_update_perm(perm, ipcp);
428 if (err) {
429 goto out_unlock0;
430 }
431
432 msq->q_qbytes = msg_qbytes;
433
434 msq->q_ctime = ktime_get_real_seconds();
435 /*
436 * Sleeping receivers might be excluded by
437 * stricter permissions.
438 */
439 expunge_all(msq, -EAGAIN, &wake_q);
440 /*
441 * Sleeping senders might be able to send
442 * due to a larger queue size.
443 */
444 ss_wakeup(msq, &wake_q, false);
445 ipc_unlock_object(&msq->q_perm);
446 wake_up_q(&wake_q);
447
448 goto out_unlock1;
449 }
450 default:
451 err = -EINVAL;
452 goto out_unlock1;
453 }
454
455 out_unlock0:
456 ipc_unlock_object(&msq->q_perm);
457 out_unlock1:
458 rcu_read_unlock();
459 out_up:
460 up_write(&msg_ids(ns).rwsem);
461 return err;
462 }
463
msgctl_info(struct ipc_namespace * ns,int msqid,int cmd,struct msginfo * msginfo)464 static int msgctl_info(struct ipc_namespace *ns, int msqid, int cmd, struct msginfo *msginfo)
465 {
466 int err;
467 int max_idx;
468
469 /*
470 * We must not return kernel stack data.
471 * due to padding, it's not enough
472 * to set all member fields.
473 */
474 err = security_msg_queue_msgctl(NULL, cmd);
475 if (err) {
476 return err;
477 }
478
479 memset(msginfo, 0, sizeof(*msginfo));
480 msginfo->msgmni = ns->msg_ctlmni;
481 msginfo->msgmax = ns->msg_ctlmax;
482 msginfo->msgmnb = ns->msg_ctlmnb;
483 msginfo->msgssz = MSGSSZ;
484 msginfo->msgseg = MSGSEG;
485 down_read(&msg_ids(ns).rwsem);
486 if (cmd == MSG_INFO) {
487 msginfo->msgpool = msg_ids(ns).in_use;
488 msginfo->msgmap = atomic_read(&ns->msg_hdrs);
489 msginfo->msgtql = atomic_read(&ns->msg_bytes);
490 } else {
491 msginfo->msgmap = MSGMAP;
492 msginfo->msgpool = MSGPOOL;
493 msginfo->msgtql = MSGTQL;
494 }
495 max_idx = ipc_get_maxidx(&msg_ids(ns));
496 up_read(&msg_ids(ns).rwsem);
497 return (max_idx < 0) ? 0 : max_idx;
498 }
499
msgctl_stat(struct ipc_namespace * ns,int msqid,int cmd,struct msqid64_ds * p)500 static int msgctl_stat(struct ipc_namespace *ns, int msqid, int cmd, struct msqid64_ds *p)
501 {
502 struct msg_queue *msq;
503 int err;
504
505 memset(p, 0, sizeof(*p));
506
507 rcu_read_lock();
508 if (cmd == MSG_STAT || cmd == MSG_STAT_ANY) {
509 msq = msq_obtain_object(ns, msqid);
510 if (IS_ERR(msq)) {
511 err = PTR_ERR(msq);
512 goto out_unlock;
513 }
514 } else { /* IPC_STAT */
515 msq = msq_obtain_object_check(ns, msqid);
516 if (IS_ERR(msq)) {
517 err = PTR_ERR(msq);
518 goto out_unlock;
519 }
520 }
521
522 /* see comment for SHM_STAT_ANY */
523 if (cmd == MSG_STAT_ANY) {
524 audit_ipc_obj(&msq->q_perm);
525 } else {
526 err = -EACCES;
527 if (ipcperms(ns, &msq->q_perm, S_IRUGO)) {
528 goto out_unlock;
529 }
530 }
531
532 err = security_msg_queue_msgctl(&msq->q_perm, cmd);
533 if (err) {
534 goto out_unlock;
535 }
536
537 ipc_lock_object(&msq->q_perm);
538
539 if (!ipc_valid_object(&msq->q_perm)) {
540 ipc_unlock_object(&msq->q_perm);
541 err = -EIDRM;
542 goto out_unlock;
543 }
544
545 kernel_to_ipc64_perm(&msq->q_perm, &p->msg_perm);
546 p->msg_stime = msq->q_stime;
547 p->msg_rtime = msq->q_rtime;
548 p->msg_ctime = msq->q_ctime;
549 #ifndef CONFIG_64BIT
550 p->msg_stime_high = msq->q_stime >> 0x20;
551 p->msg_rtime_high = msq->q_rtime >> 0x20;
552 p->msg_ctime_high = msq->q_ctime >> 0x20;
553 #endif
554 p->msg_cbytes = msq->q_cbytes;
555 p->msg_qnum = msq->q_qnum;
556 p->msg_qbytes = msq->q_qbytes;
557 p->msg_lspid = pid_vnr(msq->q_lspid);
558 p->msg_lrpid = pid_vnr(msq->q_lrpid);
559
560 if (cmd == IPC_STAT) {
561 /*
562 * As defined in SUS:
563 * Return 0 on success
564 */
565 err = 0;
566 } else {
567 /*
568 * MSG_STAT and MSG_STAT_ANY (both Linux specific)
569 * Return the full id, including the sequence number
570 */
571 err = msq->q_perm.id;
572 }
573
574 ipc_unlock_object(&msq->q_perm);
575 out_unlock:
576 rcu_read_unlock();
577 return err;
578 }
579
ksys_msgctl(int msqid,int cmd,struct msqid_ds __user * buf,int version)580 static long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf, int version)
581 {
582 struct ipc_namespace *ns;
583 struct msqid64_ds msqid64;
584 int err;
585
586 if (msqid < 0 || cmd < 0) {
587 return -EINVAL;
588 }
589
590 ns = current->nsproxy->ipc_ns;
591
592 switch (cmd) {
593 case IPC_INFO:
594 case MSG_INFO: {
595 struct msginfo msginfo;
596 err = msgctl_info(ns, msqid, cmd, &msginfo);
597 if (err < 0) {
598 return err;
599 }
600 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) {
601 err = -EFAULT;
602 }
603 return err;
604 }
605 case MSG_STAT: /* msqid is an index rather than a msg queue id */
606 case MSG_STAT_ANY:
607 case IPC_STAT:
608 err = msgctl_stat(ns, msqid, cmd, &msqid64);
609 if (err < 0) {
610 return err;
611 }
612 if (copy_msqid_to_user(buf, &msqid64, version)) {
613 err = -EFAULT;
614 }
615 return err;
616 case IPC_SET:
617 if (copy_msqid_from_user(&msqid64, buf, version)) {
618 return -EFAULT;
619 }
620 return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes);
621 case IPC_RMID:
622 return msgctl_down(ns, msqid, cmd, NULL, 0);
623 default:
624 return -EINVAL;
625 }
626 }
627
SYSCALL_DEFINE3(msgctl,int,msqid,int,cmd,struct msqid_ds __user *,buf)628 SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
629 {
630 return ksys_msgctl(msqid, cmd, buf, IPC_64);
631 }
632
633 #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
ksys_old_msgctl(int msqid,int cmd,struct msqid_ds __user * buf)634 long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
635 {
636 int version = ipc_parse_version(&cmd);
637
638 return ksys_msgctl(msqid, cmd, buf, version);
639 }
640
SYSCALL_DEFINE3(old_msgctl,int,msqid,int,cmd,struct msqid_ds __user *,buf)641 SYSCALL_DEFINE3(old_msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
642 {
643 return ksys_old_msgctl(msqid, cmd, buf);
644 }
645 #endif
646
647 #ifdef CONFIG_COMPAT
648
649 struct compat_msqid_ds {
650 struct compat_ipc_perm msg_perm;
651 compat_uptr_t msg_first;
652 compat_uptr_t msg_last;
653 old_time32_t msg_stime;
654 old_time32_t msg_rtime;
655 old_time32_t msg_ctime;
656 compat_ulong_t msg_lcbytes;
657 compat_ulong_t msg_lqbytes;
658 unsigned short msg_cbytes;
659 unsigned short msg_qnum;
660 unsigned short msg_qbytes;
661 compat_ipc_pid_t msg_lspid;
662 compat_ipc_pid_t msg_lrpid;
663 };
664
copy_compat_msqid_from_user(struct msqid64_ds * out,void __user * buf,int version)665 static int copy_compat_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
666 {
667 memset(out, 0, sizeof(*out));
668 if (version == IPC_64) {
669 struct compat_msqid64_ds __user *p = buf;
670 if (get_compat_ipc64_perm(&out->msg_perm, &p->msg_perm)) {
671 return -EFAULT;
672 }
673 if (get_user(out->msg_qbytes, &p->msg_qbytes)) {
674 return -EFAULT;
675 }
676 } else {
677 struct compat_msqid_ds __user *p = buf;
678 if (get_compat_ipc_perm(&out->msg_perm, &p->msg_perm)) {
679 return -EFAULT;
680 }
681 if (get_user(out->msg_qbytes, &p->msg_qbytes)) {
682 return -EFAULT;
683 }
684 }
685 return 0;
686 }
687
copy_compat_msqid_to_user(void __user * buf,struct msqid64_ds * in,int version)688 static int copy_compat_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
689 {
690 if (version == IPC_64) {
691 struct compat_msqid64_ds v;
692 memset(&v, 0, sizeof(v));
693 to_compat_ipc64_perm(&v.msg_perm, &in->msg_perm);
694 v.msg_stime = lower_32_bits(in->msg_stime);
695 v.msg_stime_high = upper_32_bits(in->msg_stime);
696 v.msg_rtime = lower_32_bits(in->msg_rtime);
697 v.msg_rtime_high = upper_32_bits(in->msg_rtime);
698 v.msg_ctime = lower_32_bits(in->msg_ctime);
699 v.msg_ctime_high = upper_32_bits(in->msg_ctime);
700 v.msg_cbytes = in->msg_cbytes;
701 v.msg_qnum = in->msg_qnum;
702 v.msg_qbytes = in->msg_qbytes;
703 v.msg_lspid = in->msg_lspid;
704 v.msg_lrpid = in->msg_lrpid;
705 return copy_to_user(buf, &v, sizeof(v));
706 } else {
707 struct compat_msqid_ds v;
708 memset(&v, 0, sizeof(v));
709 to_compat_ipc_perm(&v.msg_perm, &in->msg_perm);
710 v.msg_stime = in->msg_stime;
711 v.msg_rtime = in->msg_rtime;
712 v.msg_ctime = in->msg_ctime;
713 v.msg_cbytes = in->msg_cbytes;
714 v.msg_qnum = in->msg_qnum;
715 v.msg_qbytes = in->msg_qbytes;
716 v.msg_lspid = in->msg_lspid;
717 v.msg_lrpid = in->msg_lrpid;
718 return copy_to_user(buf, &v, sizeof(v));
719 }
720 }
721
compat_ksys_msgctl(int msqid,int cmd,void __user * uptr,int version)722 static long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr, int version)
723 {
724 struct ipc_namespace *ns;
725 int err;
726 struct msqid64_ds msqid64;
727
728 ns = current->nsproxy->ipc_ns;
729
730 if (msqid < 0 || cmd < 0) {
731 return -EINVAL;
732 }
733
734 switch (cmd & (~IPC_64)) {
735 case IPC_INFO:
736 case MSG_INFO: {
737 struct msginfo msginfo;
738 err = msgctl_info(ns, msqid, cmd, &msginfo);
739 if (err < 0) {
740 return err;
741 }
742 if (copy_to_user(uptr, &msginfo, sizeof(struct msginfo))) {
743 err = -EFAULT;
744 }
745 return err;
746 }
747 case IPC_STAT:
748 case MSG_STAT:
749 case MSG_STAT_ANY:
750 err = msgctl_stat(ns, msqid, cmd, &msqid64);
751 if (err < 0) {
752 return err;
753 }
754 if (copy_compat_msqid_to_user(uptr, &msqid64, version)) {
755 err = -EFAULT;
756 }
757 return err;
758 case IPC_SET:
759 if (copy_compat_msqid_from_user(&msqid64, uptr, version)) {
760 return -EFAULT;
761 }
762 return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes);
763 case IPC_RMID:
764 return msgctl_down(ns, msqid, cmd, NULL, 0);
765 default:
766 return -EINVAL;
767 }
768 }
769
COMPAT_SYSCALL_DEFINE3(msgctl,int,msqid,int,cmd,void __user *,uptr)770 COMPAT_SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, void __user *, uptr)
771 {
772 return compat_ksys_msgctl(msqid, cmd, uptr, IPC_64);
773 }
774
775 #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
compat_ksys_old_msgctl(int msqid,int cmd,void __user * uptr)776 long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr)
777 {
778 int version = compat_ipc_parse_version(&cmd);
779
780 return compat_ksys_msgctl(msqid, cmd, uptr, version);
781 }
782
COMPAT_SYSCALL_DEFINE3(old_msgctl,int,msqid,int,cmd,void __user *,uptr)783 COMPAT_SYSCALL_DEFINE3(old_msgctl, int, msqid, int, cmd, void __user *, uptr)
784 {
785 return compat_ksys_old_msgctl(msqid, cmd, uptr);
786 }
787 #endif
788 #endif
789
testmsg(struct msg_msg * msg,long type,int mode)790 static int testmsg(struct msg_msg *msg, long type, int mode)
791 {
792 switch (mode) {
793 case SEARCH_ANY:
794 case SEARCH_NUMBER:
795 return 1;
796 case SEARCH_LESSEQUAL:
797 if (msg->m_type <= type) {
798 return 1;
799 }
800 break;
801 case SEARCH_EQUAL:
802 if (msg->m_type == type) {
803 return 1;
804 }
805 break;
806 case SEARCH_NOTEQUAL:
807 if (msg->m_type != type) {
808 return 1;
809 }
810 break;
811 }
812 return 0;
813 }
814
pipelined_send(struct msg_queue * msq,struct msg_msg * msg,struct wake_q_head * wake_q)815 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg, struct wake_q_head *wake_q)
816 {
817 struct msg_receiver *msr, *t;
818
819 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list)
820 {
821 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
822 !security_msg_queue_msgrcv(&msq->q_perm, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) {
823 list_del(&msr->r_list);
824 if (msr->r_maxsize < msg->m_ts) {
825 wake_q_add(wake_q, msr->r_tsk);
826
827 /* See expunge_all regarding memory barrier */
828 smp_store_release(&msr->r_msg, ERR_PTR(-E2BIG));
829 } else {
830 ipc_update_pid(&msq->q_lrpid, task_pid(msr->r_tsk));
831 msq->q_rtime = ktime_get_real_seconds();
832
833 wake_q_add(wake_q, msr->r_tsk);
834
835 /* See expunge_all regarding memory barrier */
836 smp_store_release(&msr->r_msg, msg);
837 return 1;
838 }
839 }
840 }
841
842 return 0;
843 }
844
do_msgsnd(int msqid,long mtype,void __user * mtext,size_t msgsz,int msgflg)845 static long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg)
846 {
847 struct msg_queue *msq;
848 struct msg_msg *msg;
849 int err;
850 struct ipc_namespace *ns;
851 DEFINE_WAKE_Q(wake_q);
852
853 ns = current->nsproxy->ipc_ns;
854
855 if (msgsz > ns->msg_ctlmax || (long)msgsz < 0 || msqid < 0) {
856 return -EINVAL;
857 }
858 if (mtype < 1) {
859 return -EINVAL;
860 }
861
862 msg = load_msg(mtext, msgsz);
863 if (IS_ERR(msg)) {
864 return PTR_ERR(msg);
865 }
866
867 msg->m_type = mtype;
868 msg->m_ts = msgsz;
869
870 rcu_read_lock();
871 msq = msq_obtain_object_check(ns, msqid);
872 if (IS_ERR(msq)) {
873 err = PTR_ERR(msq);
874 goto out_unlock1;
875 }
876
877 ipc_lock_object(&msq->q_perm);
878
879 for (;;) {
880 struct msg_sender s;
881
882 err = -EACCES;
883 if (ipcperms(ns, &msq->q_perm, S_IWUGO)) {
884 goto out_unlock0;
885 }
886
887 /* raced with RMID? */
888 if (!ipc_valid_object(&msq->q_perm)) {
889 err = -EIDRM;
890 goto out_unlock0;
891 }
892
893 err = security_msg_queue_msgsnd(&msq->q_perm, msg, msgflg);
894 if (err) {
895 goto out_unlock0;
896 }
897
898 if (msg_fits_inqueue(msq, msgsz)) {
899 break;
900 }
901
902 /* queue full, wait: */
903 if (msgflg & IPC_NOWAIT) {
904 err = -EAGAIN;
905 goto out_unlock0;
906 }
907
908 /* enqueue the sender and prepare to block */
909 ss_add(msq, &s, msgsz);
910
911 if (!ipc_rcu_getref(&msq->q_perm)) {
912 err = -EIDRM;
913 goto out_unlock0;
914 }
915
916 ipc_unlock_object(&msq->q_perm);
917 rcu_read_unlock();
918 schedule();
919
920 rcu_read_lock();
921 ipc_lock_object(&msq->q_perm);
922
923 ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
924 /* raced with RMID? */
925 if (!ipc_valid_object(&msq->q_perm)) {
926 err = -EIDRM;
927 goto out_unlock0;
928 }
929 ss_del(&s);
930
931 if (signal_pending(current)) {
932 err = -ERESTARTNOHAND;
933 goto out_unlock0;
934 }
935 }
936
937 ipc_update_pid(&msq->q_lspid, task_tgid(current));
938 msq->q_stime = ktime_get_real_seconds();
939
940 if (!pipelined_send(msq, msg, &wake_q)) {
941 /* no one is waiting for this message, enqueue it */
942 list_add_tail(&msg->m_list, &msq->q_messages);
943 msq->q_cbytes += msgsz;
944 msq->q_qnum++;
945 atomic_add(msgsz, &ns->msg_bytes);
946 atomic_inc(&ns->msg_hdrs);
947 }
948
949 err = 0;
950 msg = NULL;
951
952 out_unlock0:
953 ipc_unlock_object(&msq->q_perm);
954 wake_up_q(&wake_q);
955 out_unlock1:
956 rcu_read_unlock();
957 if (msg != NULL) {
958 free_msg(msg);
959 }
960 return err;
961 }
962
ksys_msgsnd(int msqid,struct msgbuf __user * msgp,size_t msgsz,int msgflg)963 long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
964 {
965 long mtype;
966
967 if (get_user(mtype, &msgp->mtype)) {
968 return -EFAULT;
969 }
970 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
971 }
972
SYSCALL_DEFINE4(msgsnd,int,msqid,struct msgbuf __user *,msgp,size_t,msgsz,int,msgflg)973 SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, int, msgflg)
974 {
975 return ksys_msgsnd(msqid, msgp, msgsz, msgflg);
976 }
977
978 #ifdef CONFIG_COMPAT
979
980 struct compat_msgbuf {
981 compat_long_t mtype;
982 char mtext[1];
983 };
984
compat_ksys_msgsnd(int msqid,compat_uptr_t msgp,compat_ssize_t msgsz,int msgflg)985 long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, int msgflg)
986 {
987 struct compat_msgbuf __user *up = compat_ptr(msgp);
988 compat_long_t mtype;
989
990 if (get_user(mtype, &up->mtype)) {
991 return -EFAULT;
992 }
993 return do_msgsnd(msqid, mtype, up->mtext, (ssize_t)msgsz, msgflg);
994 }
995
COMPAT_SYSCALL_DEFINE4(msgsnd,int,msqid,compat_uptr_t,msgp,compat_ssize_t,msgsz,int,msgflg)996 COMPAT_SYSCALL_DEFINE4(msgsnd, int, msqid, compat_uptr_t, msgp, compat_ssize_t, msgsz, int, msgflg)
997 {
998 return compat_ksys_msgsnd(msqid, msgp, msgsz, msgflg);
999 }
1000 #endif
1001
convert_mode(long * msgtyp,int msgflg)1002 static inline int convert_mode(long *msgtyp, int msgflg)
1003 {
1004 if (msgflg & MSG_COPY) {
1005 return SEARCH_NUMBER;
1006 }
1007 /*
1008 * find message of correct type.
1009 * msgtyp = 0 => get first.
1010 * msgtyp > 0 => get first message of matching type.
1011 * msgtyp < 0 => get message with least type must be < abs(msgtype).
1012 */
1013 if (*msgtyp == 0) {
1014 return SEARCH_ANY;
1015 }
1016 if (*msgtyp < 0) {
1017 if (*msgtyp == LONG_MIN) { /* -LONG_MIN is undefined */
1018 *msgtyp = LONG_MAX;
1019 } else {
1020 *msgtyp = -*msgtyp;
1021 }
1022 return SEARCH_LESSEQUAL;
1023 }
1024 if (msgflg & MSG_EXCEPT) {
1025 return SEARCH_NOTEQUAL;
1026 }
1027 return SEARCH_EQUAL;
1028 }
1029
do_msg_fill(void __user * dest,struct msg_msg * msg,size_t bufsz)1030 static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
1031 {
1032 struct msgbuf __user *msgp = dest;
1033 size_t msgsz;
1034
1035 if (put_user(msg->m_type, &msgp->mtype)) {
1036 return -EFAULT;
1037 }
1038
1039 msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
1040 if (store_msg(msgp->mtext, msg, msgsz)) {
1041 return -EFAULT;
1042 }
1043 return msgsz;
1044 }
1045
1046 #ifdef CONFIG_CHECKPOINT_RESTORE
1047 /*
1048 * This function creates new kernel message structure, large enough to store
1049 * bufsz message bytes.
1050 */
prepare_copy(void __user * buf,size_t bufsz)1051 static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
1052 {
1053 struct msg_msg *copy;
1054
1055 /*
1056 * Create dummy message to copy real message to.
1057 */
1058 copy = load_msg(buf, bufsz);
1059 if (!IS_ERR(copy)) {
1060 copy->m_ts = bufsz;
1061 }
1062 return copy;
1063 }
1064
free_copy(struct msg_msg * copy)1065 static inline void free_copy(struct msg_msg *copy)
1066 {
1067 if (copy) {
1068 free_msg(copy);
1069 }
1070 }
1071 #else
prepare_copy(void __user * buf,size_t bufsz)1072 static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
1073 {
1074 return ERR_PTR(-ENOSYS);
1075 }
1076
free_copy(struct msg_msg * copy)1077 static inline void free_copy(struct msg_msg *copy)
1078 {
1079 }
1080 #endif
1081
find_msg(struct msg_queue * msq,long * msgtyp,int mode)1082 static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
1083 {
1084 struct msg_msg *msg, *found = NULL;
1085 long count = 0;
1086
1087 list_for_each_entry(msg, &msq->q_messages, m_list)
1088 {
1089 if (testmsg(msg, *msgtyp, mode) && !security_msg_queue_msgrcv(&msq->q_perm, msg, current, *msgtyp, mode)) {
1090 if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
1091 *msgtyp = msg->m_type - 1;
1092 found = msg;
1093 } else if (mode == SEARCH_NUMBER) {
1094 if (*msgtyp == count) {
1095 return msg;
1096 }
1097 } else {
1098 return msg;
1099 }
1100 count++;
1101 }
1102 }
1103
1104 return found ?: ERR_PTR(-EAGAIN);
1105 }
1106
do_msgrcv(int msqid,void __user * buf,size_t bufsz,long msgtyp,int msgflg,long (* msg_handler)(void __user *,struct msg_msg *,size_t))1107 static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
1108 long (*msg_handler)(void __user *, struct msg_msg *, size_t))
1109 {
1110 int mode;
1111 struct msg_queue *msq;
1112 struct ipc_namespace *ns;
1113 struct msg_msg *msg, *copy = NULL;
1114 DEFINE_WAKE_Q(wake_q);
1115
1116 ns = current->nsproxy->ipc_ns;
1117
1118 if (msqid < 0 || (long)bufsz < 0) {
1119 return -EINVAL;
1120 }
1121
1122 if (msgflg & MSG_COPY) {
1123 if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT)) {
1124 return -EINVAL;
1125 }
1126 copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
1127 if (IS_ERR(copy)) {
1128 return PTR_ERR(copy);
1129 }
1130 }
1131 mode = convert_mode(&msgtyp, msgflg);
1132
1133 rcu_read_lock();
1134 msq = msq_obtain_object_check(ns, msqid);
1135 if (IS_ERR(msq)) {
1136 rcu_read_unlock();
1137 free_copy(copy);
1138 return PTR_ERR(msq);
1139 }
1140
1141 for (;;) {
1142 struct msg_receiver msr_d;
1143
1144 msg = ERR_PTR(-EACCES);
1145 if (ipcperms(ns, &msq->q_perm, S_IRUGO)) {
1146 goto out_unlock1;
1147 }
1148
1149 ipc_lock_object(&msq->q_perm);
1150
1151 /* raced with RMID? */
1152 if (!ipc_valid_object(&msq->q_perm)) {
1153 msg = ERR_PTR(-EIDRM);
1154 goto out_unlock0;
1155 }
1156
1157 msg = find_msg(msq, &msgtyp, mode);
1158 if (!IS_ERR(msg)) {
1159 /*
1160 * Found a suitable message.
1161 * Unlink it from the queue.
1162 */
1163 if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
1164 msg = ERR_PTR(-E2BIG);
1165 goto out_unlock0;
1166 }
1167 /*
1168 * If we are copying, then do not unlink message and do
1169 * not update queue parameters.
1170 */
1171 if (msgflg & MSG_COPY) {
1172 msg = copy_msg(msg, copy);
1173 goto out_unlock0;
1174 }
1175
1176 list_del(&msg->m_list);
1177 msq->q_qnum--;
1178 msq->q_rtime = ktime_get_real_seconds();
1179 ipc_update_pid(&msq->q_lrpid, task_tgid(current));
1180 msq->q_cbytes -= msg->m_ts;
1181 atomic_sub(msg->m_ts, &ns->msg_bytes);
1182 atomic_dec(&ns->msg_hdrs);
1183 ss_wakeup(msq, &wake_q, false);
1184
1185 goto out_unlock0;
1186 }
1187
1188 /* No message waiting. Wait for a message */
1189 if (msgflg & IPC_NOWAIT) {
1190 msg = ERR_PTR(-ENOMSG);
1191 goto out_unlock0;
1192 }
1193
1194 list_add_tail(&msr_d.r_list, &msq->q_receivers);
1195 msr_d.r_tsk = current;
1196 msr_d.r_msgtype = msgtyp;
1197 msr_d.r_mode = mode;
1198 if (msgflg & MSG_NOERROR) {
1199 msr_d.r_maxsize = INT_MAX;
1200 } else {
1201 msr_d.r_maxsize = bufsz;
1202 }
1203
1204 /* memory barrier not require due to ipc_lock_object() */
1205 WRITE_ONCE(msr_d.r_msg, ERR_PTR(-EAGAIN));
1206
1207 /* memory barrier not required, we own ipc_lock_object() */
1208 __set_current_state(TASK_INTERRUPTIBLE);
1209
1210 ipc_unlock_object(&msq->q_perm);
1211 rcu_read_unlock();
1212 schedule();
1213
1214 /*
1215 * Lockless receive, part 1:
1216 * We don't hold a reference to the queue and getting a
1217 * reference would defeat the idea of a lockless operation,
1218 * thus the code relies on rcu to guarantee the existence of
1219 * msq:
1220 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
1221 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
1222 */
1223 rcu_read_lock();
1224
1225 /*
1226 * Lockless receive, part 2:
1227 * The work in pipelined_send() and expunge_all():
1228 * - Set pointer to message
1229 * - Queue the receiver task for later wakeup
1230 * - Wake up the process after the lock is dropped.
1231 *
1232 * Should the process wake up before this wakeup (due to a
1233 * signal) it will either see the message and continue ...
1234 */
1235 msg = READ_ONCE(msr_d.r_msg);
1236 if (msg != ERR_PTR(-EAGAIN)) {
1237 /* see MSG_BARRIER for purpose/pairing */
1238 smp_acquire__after_ctrl_dep();
1239
1240 goto out_unlock1;
1241 }
1242
1243 /*
1244 * ... or see -EAGAIN, acquire the lock to check the message
1245 * again.
1246 */
1247 ipc_lock_object(&msq->q_perm);
1248
1249 msg = READ_ONCE(msr_d.r_msg);
1250 if (msg != ERR_PTR(-EAGAIN)) {
1251 goto out_unlock0;
1252 }
1253
1254 list_del(&msr_d.r_list);
1255 if (signal_pending(current)) {
1256 msg = ERR_PTR(-ERESTARTNOHAND);
1257 goto out_unlock0;
1258 }
1259
1260 ipc_unlock_object(&msq->q_perm);
1261 }
1262
1263 out_unlock0:
1264 ipc_unlock_object(&msq->q_perm);
1265 wake_up_q(&wake_q);
1266 out_unlock1:
1267 rcu_read_unlock();
1268 if (IS_ERR(msg)) {
1269 free_copy(copy);
1270 return PTR_ERR(msg);
1271 }
1272
1273 bufsz = msg_handler(buf, msg, bufsz);
1274 free_msg(msg);
1275
1276 return bufsz;
1277 }
1278
ksys_msgrcv(int msqid,struct msgbuf __user * msgp,size_t msgsz,long msgtyp,int msgflg)1279 long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, long msgtyp, int msgflg)
1280 {
1281 return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
1282 }
1283
SYSCALL_DEFINE5(msgrcv,int,msqid,struct msgbuf __user *,msgp,size_t,msgsz,long,msgtyp,int,msgflg)1284 SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, long, msgtyp, int, msgflg)
1285 {
1286 return ksys_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg);
1287 }
1288
1289 #ifdef CONFIG_COMPAT
compat_do_msg_fill(void __user * dest,struct msg_msg * msg,size_t bufsz)1290 static long compat_do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
1291 {
1292 struct compat_msgbuf __user *msgp = dest;
1293 size_t msgsz;
1294
1295 if (put_user(msg->m_type, &msgp->mtype)) {
1296 return -EFAULT;
1297 }
1298
1299 msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
1300 if (store_msg(msgp->mtext, msg, msgsz)) {
1301 return -EFAULT;
1302 }
1303 return msgsz;
1304 }
1305
compat_ksys_msgrcv(int msqid,compat_uptr_t msgp,compat_ssize_t msgsz,compat_long_t msgtyp,int msgflg)1306 long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg)
1307 {
1308 return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, (long)msgtyp, msgflg, compat_do_msg_fill);
1309 }
1310
COMPAT_SYSCALL_DEFINE5(msgrcv,int,msqid,compat_uptr_t,msgp,compat_ssize_t,msgsz,compat_long_t,msgtyp,int,msgflg)1311 COMPAT_SYSCALL_DEFINE5(msgrcv, int, msqid, compat_uptr_t, msgp, compat_ssize_t, msgsz, compat_long_t, msgtyp, int,
1312 msgflg)
1313 {
1314 return compat_ksys_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg);
1315 }
1316 #endif
1317
msg_init_ns(struct ipc_namespace * ns)1318 void msg_init_ns(struct ipc_namespace *ns)
1319 {
1320 ns->msg_ctlmax = MSGMAX;
1321 ns->msg_ctlmnb = MSGMNB;
1322 ns->msg_ctlmni = MSGMNI;
1323
1324 atomic_set(&ns->msg_bytes, 0);
1325 atomic_set(&ns->msg_hdrs, 0);
1326 ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
1327 }
1328
1329 #ifdef CONFIG_IPC_NS
msg_exit_ns(struct ipc_namespace * ns)1330 void msg_exit_ns(struct ipc_namespace *ns)
1331 {
1332 free_ipcs(ns, &msg_ids(ns), freeque);
1333 idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
1334 rhashtable_destroy(&ns->ids[IPC_MSG_IDS].key_ht);
1335 }
1336 #endif
1337
1338 #ifdef CONFIG_PROC_FS
sysvipc_msg_proc_show(struct seq_file * s,void * it)1339 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
1340 {
1341 struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1342 struct user_namespace *user_ns = seq_user_ns(s);
1343 struct kern_ipc_perm *ipcp = it;
1344 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
1345
1346 seq_printf(s, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10llu %10llu %10llu\n", msq->q_perm.key,
1347 msq->q_perm.id, msq->q_perm.mode, msq->q_cbytes, msq->q_qnum, pid_nr_ns(msq->q_lspid, pid_ns),
1348 pid_nr_ns(msq->q_lrpid, pid_ns), from_kuid_munged(user_ns, msq->q_perm.uid),
1349 from_kgid_munged(user_ns, msq->q_perm.gid), from_kuid_munged(user_ns, msq->q_perm.cuid),
1350 from_kgid_munged(user_ns, msq->q_perm.cgid), msq->q_stime, msq->q_rtime, msq->q_ctime);
1351
1352 return 0;
1353 }
1354 #endif
1355
msg_init(void)1356 void __init msg_init(void)
1357 {
1358 msg_init_ns(&init_ipc_ns);
1359
1360 ipc_init_proc_interface("sysvipc/msg",
1361 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid "
1362 " stime rtime ctime\n",
1363 IPC_MSG_IDS, sysvipc_msg_proc_show);
1364 }
1365