1 /*
2 * linux/ipc/msg.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 *
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
10 *
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
12 *
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 */
24
25 #include <linux/capability.h>
26 #include <linux/msg.h>
27 #include <linux/spinlock.h>
28 #include <linux/init.h>
29 #include <linux/mm.h>
30 #include <linux/proc_fs.h>
31 #include <linux/list.h>
32 #include <linux/security.h>
33 #include <linux/sched.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/seq_file.h>
37 #include <linux/rwsem.h>
38 #include <linux/nsproxy.h>
39 #include <linux/ipc_namespace.h>
40
41 #include <asm/current.h>
42 #include <asm/uaccess.h>
43 #include "util.h"
44
45 /*
46 * one msg_receiver structure for each sleeping receiver:
47 */
48 struct msg_receiver {
49 struct list_head r_list;
50 struct task_struct *r_tsk;
51
52 int r_mode;
53 long r_msgtype;
54 long r_maxsize;
55
56 struct msg_msg *volatile r_msg;
57 };
58
59 /* one msg_sender for each sleeping sender */
60 struct msg_sender {
61 struct list_head list;
62 struct task_struct *tsk;
63 };
64
65 #define SEARCH_ANY 1
66 #define SEARCH_EQUAL 2
67 #define SEARCH_NOTEQUAL 3
68 #define SEARCH_LESSEQUAL 4
69 #define SEARCH_NUMBER 5
70
71 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
72
73 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
74
75 static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
76 static int newque(struct ipc_namespace *, struct ipc_params *);
77 #ifdef CONFIG_PROC_FS
78 static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
79 #endif
80
81 /*
82 * Scale msgmni with the available lowmem size: the memory dedicated to msg
83 * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
84 * Also take into account the number of nsproxies created so far.
85 * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
86 */
recompute_msgmni(struct ipc_namespace * ns)87 void recompute_msgmni(struct ipc_namespace *ns)
88 {
89 struct sysinfo i;
90 unsigned long allowed;
91 int nb_ns;
92
93 si_meminfo(&i);
94 allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
95 / MSGMNB;
96 nb_ns = atomic_read(&nr_ipc_ns);
97 allowed /= nb_ns;
98
99 if (allowed < MSGMNI) {
100 ns->msg_ctlmni = MSGMNI;
101 return;
102 }
103
104 if (allowed > IPCMNI / nb_ns) {
105 ns->msg_ctlmni = IPCMNI / nb_ns;
106 return;
107 }
108
109 ns->msg_ctlmni = allowed;
110 }
111
msg_init_ns(struct ipc_namespace * ns)112 void msg_init_ns(struct ipc_namespace *ns)
113 {
114 ns->msg_ctlmax = MSGMAX;
115 ns->msg_ctlmnb = MSGMNB;
116
117 recompute_msgmni(ns);
118
119 atomic_set(&ns->msg_bytes, 0);
120 atomic_set(&ns->msg_hdrs, 0);
121 ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
122 }
123
124 #ifdef CONFIG_IPC_NS
msg_exit_ns(struct ipc_namespace * ns)125 void msg_exit_ns(struct ipc_namespace *ns)
126 {
127 free_ipcs(ns, &msg_ids(ns), freeque);
128 idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
129 }
130 #endif
131
msg_init(void)132 void __init msg_init(void)
133 {
134 msg_init_ns(&init_ipc_ns);
135
136 printk(KERN_INFO "msgmni has been set to %d\n",
137 init_ipc_ns.msg_ctlmni);
138
139 ipc_init_proc_interface("sysvipc/msg",
140 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
141 IPC_MSG_IDS, sysvipc_msg_proc_show);
142 }
143
144 /*
145 * msg_lock_(check_) routines are called in the paths where the rw_mutex
146 * is not held.
147 */
msg_lock(struct ipc_namespace * ns,int id)148 static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
149 {
150 struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
151
152 if (IS_ERR(ipcp))
153 return (struct msg_queue *)ipcp;
154
155 return container_of(ipcp, struct msg_queue, q_perm);
156 }
157
msg_lock_check(struct ipc_namespace * ns,int id)158 static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
159 int id)
160 {
161 struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
162
163 if (IS_ERR(ipcp))
164 return (struct msg_queue *)ipcp;
165
166 return container_of(ipcp, struct msg_queue, q_perm);
167 }
168
msg_rmid(struct ipc_namespace * ns,struct msg_queue * s)169 static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
170 {
171 ipc_rmid(&msg_ids(ns), &s->q_perm);
172 }
173
msg_rcu_free(struct rcu_head * head)174 static void msg_rcu_free(struct rcu_head *head)
175 {
176 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
177 struct msg_queue *msq = ipc_rcu_to_struct(p);
178
179 security_msg_queue_free(msq);
180 ipc_rcu_free(head);
181 }
182
183 /**
184 * newque - Create a new msg queue
185 * @ns: namespace
186 * @params: ptr to the structure that contains the key and msgflg
187 *
188 * Called with msg_ids.rw_mutex held (writer)
189 */
newque(struct ipc_namespace * ns,struct ipc_params * params)190 static int newque(struct ipc_namespace *ns, struct ipc_params *params)
191 {
192 struct msg_queue *msq;
193 int id, retval;
194 key_t key = params->key;
195 int msgflg = params->flg;
196
197 msq = ipc_rcu_alloc(sizeof(*msq));
198 if (!msq)
199 return -ENOMEM;
200
201 msq->q_perm.mode = msgflg & S_IRWXUGO;
202 msq->q_perm.key = key;
203
204 msq->q_perm.security = NULL;
205 retval = security_msg_queue_alloc(msq);
206 if (retval) {
207 ipc_rcu_putref(msq, ipc_rcu_free);
208 return retval;
209 }
210
211 msq->q_stime = msq->q_rtime = 0;
212 msq->q_ctime = get_seconds();
213 msq->q_cbytes = msq->q_qnum = 0;
214 msq->q_qbytes = ns->msg_ctlmnb;
215 msq->q_lspid = msq->q_lrpid = 0;
216 INIT_LIST_HEAD(&msq->q_messages);
217 INIT_LIST_HEAD(&msq->q_receivers);
218 INIT_LIST_HEAD(&msq->q_senders);
219
220 /* ipc_addid() locks msq upon success. */
221 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
222 if (id < 0) {
223 ipc_rcu_putref(msq, msg_rcu_free);
224 return id;
225 }
226
227 msg_unlock(msq);
228
229 return msq->q_perm.id;
230 }
231
ss_add(struct msg_queue * msq,struct msg_sender * mss)232 static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
233 {
234 mss->tsk = current;
235 current->state = TASK_INTERRUPTIBLE;
236 list_add_tail(&mss->list, &msq->q_senders);
237 }
238
ss_del(struct msg_sender * mss)239 static inline void ss_del(struct msg_sender *mss)
240 {
241 if (mss->list.next != NULL)
242 list_del(&mss->list);
243 }
244
ss_wakeup(struct list_head * h,int kill)245 static void ss_wakeup(struct list_head *h, int kill)
246 {
247 struct msg_sender *mss, *t;
248
249 list_for_each_entry_safe(mss, t, h, list) {
250 if (kill)
251 mss->list.next = NULL;
252 wake_up_process(mss->tsk);
253 }
254 }
255
expunge_all(struct msg_queue * msq,int res)256 static void expunge_all(struct msg_queue *msq, int res)
257 {
258 struct msg_receiver *msr, *t;
259
260 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
261 msr->r_msg = NULL;
262 wake_up_process(msr->r_tsk);
263 smp_mb();
264 msr->r_msg = ERR_PTR(res);
265 }
266 }
267
268 /*
269 * freeque() wakes up waiters on the sender and receiver waiting queue,
270 * removes the message queue from message queue ID IDR, and cleans up all the
271 * messages associated with this queue.
272 *
273 * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
274 * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
275 */
freeque(struct ipc_namespace * ns,struct kern_ipc_perm * ipcp)276 static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
277 {
278 struct msg_msg *msg, *t;
279 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
280
281 expunge_all(msq, -EIDRM);
282 ss_wakeup(&msq->q_senders, 1);
283 msg_rmid(ns, msq);
284 msg_unlock(msq);
285
286 list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
287 atomic_dec(&ns->msg_hdrs);
288 free_msg(msg);
289 }
290 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
291 ipc_rcu_putref(msq, msg_rcu_free);
292 }
293
294 /*
295 * Called with msg_ids.rw_mutex and ipcp locked.
296 */
msg_security(struct kern_ipc_perm * ipcp,int msgflg)297 static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
298 {
299 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
300
301 return security_msg_queue_associate(msq, msgflg);
302 }
303
SYSCALL_DEFINE2(msgget,key_t,key,int,msgflg)304 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
305 {
306 struct ipc_namespace *ns;
307 struct ipc_ops msg_ops;
308 struct ipc_params msg_params;
309
310 ns = current->nsproxy->ipc_ns;
311
312 msg_ops.getnew = newque;
313 msg_ops.associate = msg_security;
314 msg_ops.more_checks = NULL;
315
316 msg_params.key = key;
317 msg_params.flg = msgflg;
318
319 return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
320 }
321
322 static inline unsigned long
copy_msqid_to_user(void __user * buf,struct msqid64_ds * in,int version)323 copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
324 {
325 switch(version) {
326 case IPC_64:
327 return copy_to_user(buf, in, sizeof(*in));
328 case IPC_OLD:
329 {
330 struct msqid_ds out;
331
332 memset(&out, 0, sizeof(out));
333
334 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
335
336 out.msg_stime = in->msg_stime;
337 out.msg_rtime = in->msg_rtime;
338 out.msg_ctime = in->msg_ctime;
339
340 if (in->msg_cbytes > USHRT_MAX)
341 out.msg_cbytes = USHRT_MAX;
342 else
343 out.msg_cbytes = in->msg_cbytes;
344 out.msg_lcbytes = in->msg_cbytes;
345
346 if (in->msg_qnum > USHRT_MAX)
347 out.msg_qnum = USHRT_MAX;
348 else
349 out.msg_qnum = in->msg_qnum;
350
351 if (in->msg_qbytes > USHRT_MAX)
352 out.msg_qbytes = USHRT_MAX;
353 else
354 out.msg_qbytes = in->msg_qbytes;
355 out.msg_lqbytes = in->msg_qbytes;
356
357 out.msg_lspid = in->msg_lspid;
358 out.msg_lrpid = in->msg_lrpid;
359
360 return copy_to_user(buf, &out, sizeof(out));
361 }
362 default:
363 return -EINVAL;
364 }
365 }
366
367 static inline unsigned long
copy_msqid_from_user(struct msqid64_ds * out,void __user * buf,int version)368 copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
369 {
370 switch(version) {
371 case IPC_64:
372 if (copy_from_user(out, buf, sizeof(*out)))
373 return -EFAULT;
374 return 0;
375 case IPC_OLD:
376 {
377 struct msqid_ds tbuf_old;
378
379 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
380 return -EFAULT;
381
382 out->msg_perm.uid = tbuf_old.msg_perm.uid;
383 out->msg_perm.gid = tbuf_old.msg_perm.gid;
384 out->msg_perm.mode = tbuf_old.msg_perm.mode;
385
386 if (tbuf_old.msg_qbytes == 0)
387 out->msg_qbytes = tbuf_old.msg_lqbytes;
388 else
389 out->msg_qbytes = tbuf_old.msg_qbytes;
390
391 return 0;
392 }
393 default:
394 return -EINVAL;
395 }
396 }
397
398 /*
399 * This function handles some msgctl commands which require the rw_mutex
400 * to be held in write mode.
401 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
402 */
msgctl_down(struct ipc_namespace * ns,int msqid,int cmd,struct msqid_ds __user * buf,int version)403 static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
404 struct msqid_ds __user *buf, int version)
405 {
406 struct kern_ipc_perm *ipcp;
407 struct msqid64_ds uninitialized_var(msqid64);
408 struct msg_queue *msq;
409 int err;
410
411 if (cmd == IPC_SET) {
412 if (copy_msqid_from_user(&msqid64, buf, version))
413 return -EFAULT;
414 }
415
416 ipcp = ipcctl_pre_down(ns, &msg_ids(ns), msqid, cmd,
417 &msqid64.msg_perm, msqid64.msg_qbytes);
418 if (IS_ERR(ipcp))
419 return PTR_ERR(ipcp);
420
421 msq = container_of(ipcp, struct msg_queue, q_perm);
422
423 err = security_msg_queue_msgctl(msq, cmd);
424 if (err)
425 goto out_unlock;
426
427 switch (cmd) {
428 case IPC_RMID:
429 freeque(ns, ipcp);
430 goto out_up;
431 case IPC_SET:
432 if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
433 !capable(CAP_SYS_RESOURCE)) {
434 err = -EPERM;
435 goto out_unlock;
436 }
437
438 err = ipc_update_perm(&msqid64.msg_perm, ipcp);
439 if (err)
440 goto out_unlock;
441
442 msq->q_qbytes = msqid64.msg_qbytes;
443
444 msq->q_ctime = get_seconds();
445 /* sleeping receivers might be excluded by
446 * stricter permissions.
447 */
448 expunge_all(msq, -EAGAIN);
449 /* sleeping senders might be able to send
450 * due to a larger queue size.
451 */
452 ss_wakeup(&msq->q_senders, 0);
453 break;
454 default:
455 err = -EINVAL;
456 }
457 out_unlock:
458 msg_unlock(msq);
459 out_up:
460 up_write(&msg_ids(ns).rw_mutex);
461 return err;
462 }
463
SYSCALL_DEFINE3(msgctl,int,msqid,int,cmd,struct msqid_ds __user *,buf)464 SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
465 {
466 struct msg_queue *msq;
467 int err, version;
468 struct ipc_namespace *ns;
469
470 if (msqid < 0 || cmd < 0)
471 return -EINVAL;
472
473 version = ipc_parse_version(&cmd);
474 ns = current->nsproxy->ipc_ns;
475
476 switch (cmd) {
477 case IPC_INFO:
478 case MSG_INFO:
479 {
480 struct msginfo msginfo;
481 int max_id;
482
483 if (!buf)
484 return -EFAULT;
485 /*
486 * We must not return kernel stack data.
487 * due to padding, it's not enough
488 * to set all member fields.
489 */
490 err = security_msg_queue_msgctl(NULL, cmd);
491 if (err)
492 return err;
493
494 memset(&msginfo, 0, sizeof(msginfo));
495 msginfo.msgmni = ns->msg_ctlmni;
496 msginfo.msgmax = ns->msg_ctlmax;
497 msginfo.msgmnb = ns->msg_ctlmnb;
498 msginfo.msgssz = MSGSSZ;
499 msginfo.msgseg = MSGSEG;
500 down_read(&msg_ids(ns).rw_mutex);
501 if (cmd == MSG_INFO) {
502 msginfo.msgpool = msg_ids(ns).in_use;
503 msginfo.msgmap = atomic_read(&ns->msg_hdrs);
504 msginfo.msgtql = atomic_read(&ns->msg_bytes);
505 } else {
506 msginfo.msgmap = MSGMAP;
507 msginfo.msgpool = MSGPOOL;
508 msginfo.msgtql = MSGTQL;
509 }
510 max_id = ipc_get_maxid(&msg_ids(ns));
511 up_read(&msg_ids(ns).rw_mutex);
512 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
513 return -EFAULT;
514 return (max_id < 0) ? 0 : max_id;
515 }
516 case MSG_STAT: /* msqid is an index rather than a msg queue id */
517 case IPC_STAT:
518 {
519 struct msqid64_ds tbuf;
520 int success_return;
521
522 if (!buf)
523 return -EFAULT;
524
525 if (cmd == MSG_STAT) {
526 msq = msg_lock(ns, msqid);
527 if (IS_ERR(msq))
528 return PTR_ERR(msq);
529 success_return = msq->q_perm.id;
530 } else {
531 msq = msg_lock_check(ns, msqid);
532 if (IS_ERR(msq))
533 return PTR_ERR(msq);
534 success_return = 0;
535 }
536 err = -EACCES;
537 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
538 goto out_unlock;
539
540 err = security_msg_queue_msgctl(msq, cmd);
541 if (err)
542 goto out_unlock;
543
544 memset(&tbuf, 0, sizeof(tbuf));
545
546 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
547 tbuf.msg_stime = msq->q_stime;
548 tbuf.msg_rtime = msq->q_rtime;
549 tbuf.msg_ctime = msq->q_ctime;
550 tbuf.msg_cbytes = msq->q_cbytes;
551 tbuf.msg_qnum = msq->q_qnum;
552 tbuf.msg_qbytes = msq->q_qbytes;
553 tbuf.msg_lspid = msq->q_lspid;
554 tbuf.msg_lrpid = msq->q_lrpid;
555 msg_unlock(msq);
556 if (copy_msqid_to_user(buf, &tbuf, version))
557 return -EFAULT;
558 return success_return;
559 }
560 case IPC_SET:
561 case IPC_RMID:
562 err = msgctl_down(ns, msqid, cmd, buf, version);
563 return err;
564 default:
565 return -EINVAL;
566 }
567
568 out_unlock:
569 msg_unlock(msq);
570 return err;
571 }
572
testmsg(struct msg_msg * msg,long type,int mode)573 static int testmsg(struct msg_msg *msg, long type, int mode)
574 {
575 switch(mode)
576 {
577 case SEARCH_ANY:
578 case SEARCH_NUMBER:
579 return 1;
580 case SEARCH_LESSEQUAL:
581 if (msg->m_type <=type)
582 return 1;
583 break;
584 case SEARCH_EQUAL:
585 if (msg->m_type == type)
586 return 1;
587 break;
588 case SEARCH_NOTEQUAL:
589 if (msg->m_type != type)
590 return 1;
591 break;
592 }
593 return 0;
594 }
595
pipelined_send(struct msg_queue * msq,struct msg_msg * msg)596 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
597 {
598 struct msg_receiver *msr, *t;
599
600 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
601 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
602 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
603 msr->r_msgtype, msr->r_mode)) {
604
605 list_del(&msr->r_list);
606 if (msr->r_maxsize < msg->m_ts) {
607 msr->r_msg = NULL;
608 wake_up_process(msr->r_tsk);
609 smp_mb();
610 msr->r_msg = ERR_PTR(-E2BIG);
611 } else {
612 msr->r_msg = NULL;
613 msq->q_lrpid = task_pid_vnr(msr->r_tsk);
614 msq->q_rtime = get_seconds();
615 wake_up_process(msr->r_tsk);
616 smp_mb();
617 msr->r_msg = msg;
618
619 return 1;
620 }
621 }
622 }
623 return 0;
624 }
625
do_msgsnd(int msqid,long mtype,void __user * mtext,size_t msgsz,int msgflg)626 long do_msgsnd(int msqid, long mtype, void __user *mtext,
627 size_t msgsz, int msgflg)
628 {
629 struct msg_queue *msq;
630 struct msg_msg *msg;
631 int err;
632 struct ipc_namespace *ns;
633
634 ns = current->nsproxy->ipc_ns;
635
636 if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
637 return -EINVAL;
638 if (mtype < 1)
639 return -EINVAL;
640
641 msg = load_msg(mtext, msgsz);
642 if (IS_ERR(msg))
643 return PTR_ERR(msg);
644
645 msg->m_type = mtype;
646 msg->m_ts = msgsz;
647
648 msq = msg_lock_check(ns, msqid);
649 if (IS_ERR(msq)) {
650 err = PTR_ERR(msq);
651 goto out_free;
652 }
653
654 for (;;) {
655 struct msg_sender s;
656
657 err = -EACCES;
658 if (ipcperms(ns, &msq->q_perm, S_IWUGO))
659 goto out_unlock_free;
660
661 err = security_msg_queue_msgsnd(msq, msg, msgflg);
662 if (err)
663 goto out_unlock_free;
664
665 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
666 1 + msq->q_qnum <= msq->q_qbytes) {
667 break;
668 }
669
670 /* queue full, wait: */
671 if (msgflg & IPC_NOWAIT) {
672 err = -EAGAIN;
673 goto out_unlock_free;
674 }
675 ss_add(msq, &s);
676
677 if (!ipc_rcu_getref(msq)) {
678 err = -EIDRM;
679 goto out_unlock_free;
680 }
681
682 msg_unlock(msq);
683 schedule();
684
685 ipc_lock_by_ptr(&msq->q_perm);
686 ipc_rcu_putref(msq, ipc_rcu_free);
687 if (msq->q_perm.deleted) {
688 err = -EIDRM;
689 goto out_unlock_free;
690 }
691 ss_del(&s);
692
693 if (signal_pending(current)) {
694 err = -ERESTARTNOHAND;
695 goto out_unlock_free;
696 }
697 }
698
699 msq->q_lspid = task_tgid_vnr(current);
700 msq->q_stime = get_seconds();
701
702 if (!pipelined_send(msq, msg)) {
703 /* no one is waiting for this message, enqueue it */
704 list_add_tail(&msg->m_list, &msq->q_messages);
705 msq->q_cbytes += msgsz;
706 msq->q_qnum++;
707 atomic_add(msgsz, &ns->msg_bytes);
708 atomic_inc(&ns->msg_hdrs);
709 }
710
711 err = 0;
712 msg = NULL;
713
714 out_unlock_free:
715 msg_unlock(msq);
716 out_free:
717 if (msg != NULL)
718 free_msg(msg);
719 return err;
720 }
721
SYSCALL_DEFINE4(msgsnd,int,msqid,struct msgbuf __user *,msgp,size_t,msgsz,int,msgflg)722 SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
723 int, msgflg)
724 {
725 long mtype;
726
727 if (get_user(mtype, &msgp->mtype))
728 return -EFAULT;
729 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
730 }
731
convert_mode(long * msgtyp,int msgflg)732 static inline int convert_mode(long *msgtyp, int msgflg)
733 {
734 if (msgflg & MSG_COPY)
735 return SEARCH_NUMBER;
736 /*
737 * find message of correct type.
738 * msgtyp = 0 => get first.
739 * msgtyp > 0 => get first message of matching type.
740 * msgtyp < 0 => get message with least type must be < abs(msgtype).
741 */
742 if (*msgtyp == 0)
743 return SEARCH_ANY;
744 if (*msgtyp < 0) {
745 *msgtyp = -*msgtyp;
746 return SEARCH_LESSEQUAL;
747 }
748 if (msgflg & MSG_EXCEPT)
749 return SEARCH_NOTEQUAL;
750 return SEARCH_EQUAL;
751 }
752
do_msg_fill(void __user * dest,struct msg_msg * msg,size_t bufsz)753 static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
754 {
755 struct msgbuf __user *msgp = dest;
756 size_t msgsz;
757
758 if (put_user(msg->m_type, &msgp->mtype))
759 return -EFAULT;
760
761 msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
762 if (store_msg(msgp->mtext, msg, msgsz))
763 return -EFAULT;
764 return msgsz;
765 }
766
767 #ifdef CONFIG_CHECKPOINT_RESTORE
768 /*
769 * This function creates new kernel message structure, large enough to store
770 * bufsz message bytes.
771 */
prepare_copy(void __user * buf,size_t bufsz)772 static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
773 {
774 struct msg_msg *copy;
775
776 /*
777 * Create dummy message to copy real message to.
778 */
779 copy = load_msg(buf, bufsz);
780 if (!IS_ERR(copy))
781 copy->m_ts = bufsz;
782 return copy;
783 }
784
free_copy(struct msg_msg * copy)785 static inline void free_copy(struct msg_msg *copy)
786 {
787 if (copy)
788 free_msg(copy);
789 }
790 #else
prepare_copy(void __user * buf,size_t bufsz)791 static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
792 {
793 return ERR_PTR(-ENOSYS);
794 }
795
free_copy(struct msg_msg * copy)796 static inline void free_copy(struct msg_msg *copy)
797 {
798 }
799 #endif
800
find_msg(struct msg_queue * msq,long * msgtyp,int mode)801 static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
802 {
803 struct msg_msg *msg;
804 long count = 0;
805
806 list_for_each_entry(msg, &msq->q_messages, m_list) {
807 if (testmsg(msg, *msgtyp, mode) &&
808 !security_msg_queue_msgrcv(msq, msg, current,
809 *msgtyp, mode)) {
810 if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
811 *msgtyp = msg->m_type - 1;
812 } else if (mode == SEARCH_NUMBER) {
813 if (*msgtyp == count)
814 return msg;
815 } else
816 return msg;
817 count++;
818 }
819 }
820
821 return ERR_PTR(-EAGAIN);
822 }
823
824
do_msgrcv(int msqid,void __user * buf,size_t bufsz,long msgtyp,int msgflg,long (* msg_handler)(void __user *,struct msg_msg *,size_t))825 long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
826 int msgflg,
827 long (*msg_handler)(void __user *, struct msg_msg *, size_t))
828 {
829 struct msg_queue *msq;
830 struct msg_msg *msg;
831 int mode;
832 struct ipc_namespace *ns;
833 struct msg_msg *copy = NULL;
834
835 ns = current->nsproxy->ipc_ns;
836
837 if (msqid < 0 || (long) bufsz < 0)
838 return -EINVAL;
839 if (msgflg & MSG_COPY) {
840 copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
841 if (IS_ERR(copy))
842 return PTR_ERR(copy);
843 }
844 mode = convert_mode(&msgtyp, msgflg);
845
846 msq = msg_lock_check(ns, msqid);
847 if (IS_ERR(msq)) {
848 free_copy(copy);
849 return PTR_ERR(msq);
850 }
851
852 for (;;) {
853 struct msg_receiver msr_d;
854
855 msg = ERR_PTR(-EACCES);
856 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
857 goto out_unlock;
858
859 msg = find_msg(msq, &msgtyp, mode);
860
861 if (!IS_ERR(msg)) {
862 /*
863 * Found a suitable message.
864 * Unlink it from the queue.
865 */
866 if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
867 msg = ERR_PTR(-E2BIG);
868 goto out_unlock;
869 }
870 /*
871 * If we are copying, then do not unlink message and do
872 * not update queue parameters.
873 */
874 if (msgflg & MSG_COPY) {
875 msg = copy_msg(msg, copy);
876 goto out_unlock;
877 }
878 list_del(&msg->m_list);
879 msq->q_qnum--;
880 msq->q_rtime = get_seconds();
881 msq->q_lrpid = task_tgid_vnr(current);
882 msq->q_cbytes -= msg->m_ts;
883 atomic_sub(msg->m_ts, &ns->msg_bytes);
884 atomic_dec(&ns->msg_hdrs);
885 ss_wakeup(&msq->q_senders, 0);
886 msg_unlock(msq);
887 break;
888 }
889 /* No message waiting. Wait for a message */
890 if (msgflg & IPC_NOWAIT) {
891 msg = ERR_PTR(-ENOMSG);
892 goto out_unlock;
893 }
894 list_add_tail(&msr_d.r_list, &msq->q_receivers);
895 msr_d.r_tsk = current;
896 msr_d.r_msgtype = msgtyp;
897 msr_d.r_mode = mode;
898 if (msgflg & MSG_NOERROR)
899 msr_d.r_maxsize = INT_MAX;
900 else
901 msr_d.r_maxsize = bufsz;
902 msr_d.r_msg = ERR_PTR(-EAGAIN);
903 current->state = TASK_INTERRUPTIBLE;
904 msg_unlock(msq);
905
906 schedule();
907
908 /* Lockless receive, part 1:
909 * Disable preemption. We don't hold a reference to the queue
910 * and getting a reference would defeat the idea of a lockless
911 * operation, thus the code relies on rcu to guarantee the
912 * existence of msq:
913 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
914 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
915 * rcu_read_lock() prevents preemption between reading r_msg
916 * and the spin_lock() inside ipc_lock_by_ptr().
917 */
918 rcu_read_lock();
919
920 /* Lockless receive, part 2:
921 * Wait until pipelined_send or expunge_all are outside of
922 * wake_up_process(). There is a race with exit(), see
923 * ipc/mqueue.c for the details.
924 */
925 msg = (struct msg_msg*)msr_d.r_msg;
926 while (msg == NULL) {
927 cpu_relax();
928 msg = (struct msg_msg *)msr_d.r_msg;
929 }
930
931 /* Lockless receive, part 3:
932 * If there is a message or an error then accept it without
933 * locking.
934 */
935 if (msg != ERR_PTR(-EAGAIN)) {
936 rcu_read_unlock();
937 break;
938 }
939
940 /* Lockless receive, part 3:
941 * Acquire the queue spinlock.
942 */
943 ipc_lock_by_ptr(&msq->q_perm);
944 rcu_read_unlock();
945
946 /* Lockless receive, part 4:
947 * Repeat test after acquiring the spinlock.
948 */
949 msg = (struct msg_msg*)msr_d.r_msg;
950 if (msg != ERR_PTR(-EAGAIN))
951 goto out_unlock;
952
953 list_del(&msr_d.r_list);
954 if (signal_pending(current)) {
955 msg = ERR_PTR(-ERESTARTNOHAND);
956 out_unlock:
957 msg_unlock(msq);
958 break;
959 }
960 }
961 if (IS_ERR(msg)) {
962 free_copy(copy);
963 return PTR_ERR(msg);
964 }
965
966 bufsz = msg_handler(buf, msg, bufsz);
967 free_msg(msg);
968
969 return bufsz;
970 }
971
SYSCALL_DEFINE5(msgrcv,int,msqid,struct msgbuf __user *,msgp,size_t,msgsz,long,msgtyp,int,msgflg)972 SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
973 long, msgtyp, int, msgflg)
974 {
975 return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
976 }
977
978 #ifdef CONFIG_PROC_FS
sysvipc_msg_proc_show(struct seq_file * s,void * it)979 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
980 {
981 struct user_namespace *user_ns = seq_user_ns(s);
982 struct msg_queue *msq = it;
983
984 return seq_printf(s,
985 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
986 msq->q_perm.key,
987 msq->q_perm.id,
988 msq->q_perm.mode,
989 msq->q_cbytes,
990 msq->q_qnum,
991 msq->q_lspid,
992 msq->q_lrpid,
993 from_kuid_munged(user_ns, msq->q_perm.uid),
994 from_kgid_munged(user_ns, msq->q_perm.gid),
995 from_kuid_munged(user_ns, msq->q_perm.cuid),
996 from_kgid_munged(user_ns, msq->q_perm.cgid),
997 msq->q_stime,
998 msq->q_rtime,
999 msq->q_ctime);
1000 }
1001 #endif
1002