1 /*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
6 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
10 *
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/sysctl.h>
23 #include <linux/poll.h>
24 #include <linux/mqueue.h>
25 #include <linux/msg.h>
26 #include <linux/skbuff.h>
27 #include <linux/netlink.h>
28 #include <linux/syscalls.h>
29 #include <linux/audit.h>
30 #include <linux/signal.h>
31 #include <linux/mutex.h>
32 #include <linux/nsproxy.h>
33 #include <linux/pid.h>
34
35 #include <net/sock.h>
36 #include "util.h"
37
38 #define MQUEUE_MAGIC 0x19800202
39 #define DIRENT_SIZE 20
40 #define FILENT_SIZE 80
41
42 #define SEND 0
43 #define RECV 1
44
45 #define STATE_NONE 0
46 #define STATE_PENDING 1
47 #define STATE_READY 2
48
49 /* default values */
50 #define DFLT_QUEUESMAX 256 /* max number of message queues */
51 #define DFLT_MSGMAX 10 /* max number of messages in each queue */
52 #define HARD_MSGMAX (131072/sizeof(void*))
53 #define DFLT_MSGSIZEMAX 8192 /* max message size */
54
55 /*
56 * Define the ranges various user-specified maximum values can
57 * be set to.
58 */
59 #define MIN_MSGMAX 1 /* min value for msg_max */
60 #define MAX_MSGMAX HARD_MSGMAX /* max value for msg_max */
61 #define MIN_MSGSIZEMAX 128 /* min value for msgsize_max */
62 #define MAX_MSGSIZEMAX (8192*128) /* max value for msgsize_max */
63
64 struct ext_wait_queue { /* queue of sleeping tasks */
65 struct task_struct *task;
66 struct list_head list;
67 struct msg_msg *msg; /* ptr of loaded message */
68 int state; /* one of STATE_* values */
69 };
70
71 struct mqueue_inode_info {
72 spinlock_t lock;
73 struct inode vfs_inode;
74 wait_queue_head_t wait_q;
75
76 struct msg_msg **messages;
77 struct mq_attr attr;
78
79 struct sigevent notify;
80 struct pid* notify_owner;
81 struct user_struct *user; /* user who created, for accounting */
82 struct sock *notify_sock;
83 struct sk_buff *notify_cookie;
84
85 /* for tasks waiting for free space and messages, respectively */
86 struct ext_wait_queue e_wait_q[2];
87
88 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
89 };
90
91 static const struct inode_operations mqueue_dir_inode_operations;
92 static const struct file_operations mqueue_file_operations;
93 static struct super_operations mqueue_super_ops;
94 static void remove_notification(struct mqueue_inode_info *info);
95
96 static spinlock_t mq_lock;
97 static struct kmem_cache *mqueue_inode_cachep;
98 static struct vfsmount *mqueue_mnt;
99
100 static unsigned int queues_count;
101 static unsigned int queues_max = DFLT_QUEUESMAX;
102 static unsigned int msg_max = DFLT_MSGMAX;
103 static unsigned int msgsize_max = DFLT_MSGSIZEMAX;
104
105 static struct ctl_table_header * mq_sysctl_table;
106
MQUEUE_I(struct inode * inode)107 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
108 {
109 return container_of(inode, struct mqueue_inode_info, vfs_inode);
110 }
111
mqueue_get_inode(struct super_block * sb,int mode,struct mq_attr * attr)112 static struct inode *mqueue_get_inode(struct super_block *sb, int mode,
113 struct mq_attr *attr)
114 {
115 struct user_struct *u = current_user();
116 struct inode *inode;
117
118 inode = new_inode(sb);
119 if (inode) {
120 inode->i_mode = mode;
121 inode->i_uid = current_fsuid();
122 inode->i_gid = current_fsgid();
123 inode->i_mtime = inode->i_ctime = inode->i_atime =
124 CURRENT_TIME;
125
126 if (S_ISREG(mode)) {
127 struct mqueue_inode_info *info;
128 struct task_struct *p = current;
129 unsigned long mq_bytes, mq_msg_tblsz;
130
131 inode->i_fop = &mqueue_file_operations;
132 inode->i_size = FILENT_SIZE;
133 /* mqueue specific info */
134 info = MQUEUE_I(inode);
135 spin_lock_init(&info->lock);
136 init_waitqueue_head(&info->wait_q);
137 INIT_LIST_HEAD(&info->e_wait_q[0].list);
138 INIT_LIST_HEAD(&info->e_wait_q[1].list);
139 info->messages = NULL;
140 info->notify_owner = NULL;
141 info->qsize = 0;
142 info->user = NULL; /* set when all is ok */
143 memset(&info->attr, 0, sizeof(info->attr));
144 info->attr.mq_maxmsg = msg_max;
145 info->attr.mq_msgsize = msgsize_max;
146 if (attr) {
147 info->attr.mq_maxmsg = attr->mq_maxmsg;
148 info->attr.mq_msgsize = attr->mq_msgsize;
149 }
150 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
151 mq_bytes = (mq_msg_tblsz +
152 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
153
154 spin_lock(&mq_lock);
155 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
156 u->mq_bytes + mq_bytes >
157 p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
158 spin_unlock(&mq_lock);
159 goto out_inode;
160 }
161 u->mq_bytes += mq_bytes;
162 spin_unlock(&mq_lock);
163
164 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
165 if (!info->messages) {
166 spin_lock(&mq_lock);
167 u->mq_bytes -= mq_bytes;
168 spin_unlock(&mq_lock);
169 goto out_inode;
170 }
171 /* all is ok */
172 info->user = get_uid(u);
173 } else if (S_ISDIR(mode)) {
174 inc_nlink(inode);
175 /* Some things misbehave if size == 0 on a directory */
176 inode->i_size = 2 * DIRENT_SIZE;
177 inode->i_op = &mqueue_dir_inode_operations;
178 inode->i_fop = &simple_dir_operations;
179 }
180 }
181 return inode;
182 out_inode:
183 make_bad_inode(inode);
184 iput(inode);
185 return NULL;
186 }
187
mqueue_fill_super(struct super_block * sb,void * data,int silent)188 static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
189 {
190 struct inode *inode;
191
192 sb->s_blocksize = PAGE_CACHE_SIZE;
193 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
194 sb->s_magic = MQUEUE_MAGIC;
195 sb->s_op = &mqueue_super_ops;
196
197 inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
198 if (!inode)
199 return -ENOMEM;
200
201 sb->s_root = d_alloc_root(inode);
202 if (!sb->s_root) {
203 iput(inode);
204 return -ENOMEM;
205 }
206
207 return 0;
208 }
209
mqueue_get_sb(struct file_system_type * fs_type,int flags,const char * dev_name,void * data,struct vfsmount * mnt)210 static int mqueue_get_sb(struct file_system_type *fs_type,
211 int flags, const char *dev_name,
212 void *data, struct vfsmount *mnt)
213 {
214 return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt);
215 }
216
init_once(void * foo)217 static void init_once(void *foo)
218 {
219 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
220
221 inode_init_once(&p->vfs_inode);
222 }
223
mqueue_alloc_inode(struct super_block * sb)224 static struct inode *mqueue_alloc_inode(struct super_block *sb)
225 {
226 struct mqueue_inode_info *ei;
227
228 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
229 if (!ei)
230 return NULL;
231 return &ei->vfs_inode;
232 }
233
mqueue_destroy_inode(struct inode * inode)234 static void mqueue_destroy_inode(struct inode *inode)
235 {
236 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
237 }
238
mqueue_delete_inode(struct inode * inode)239 static void mqueue_delete_inode(struct inode *inode)
240 {
241 struct mqueue_inode_info *info;
242 struct user_struct *user;
243 unsigned long mq_bytes;
244 int i;
245
246 if (S_ISDIR(inode->i_mode)) {
247 clear_inode(inode);
248 return;
249 }
250 info = MQUEUE_I(inode);
251 spin_lock(&info->lock);
252 for (i = 0; i < info->attr.mq_curmsgs; i++)
253 free_msg(info->messages[i]);
254 kfree(info->messages);
255 spin_unlock(&info->lock);
256
257 clear_inode(inode);
258
259 mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) +
260 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
261 user = info->user;
262 if (user) {
263 spin_lock(&mq_lock);
264 user->mq_bytes -= mq_bytes;
265 queues_count--;
266 spin_unlock(&mq_lock);
267 free_uid(user);
268 }
269 }
270
mqueue_create(struct inode * dir,struct dentry * dentry,int mode,struct nameidata * nd)271 static int mqueue_create(struct inode *dir, struct dentry *dentry,
272 int mode, struct nameidata *nd)
273 {
274 struct inode *inode;
275 struct mq_attr *attr = dentry->d_fsdata;
276 int error;
277
278 spin_lock(&mq_lock);
279 if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) {
280 error = -ENOSPC;
281 goto out_lock;
282 }
283 queues_count++;
284 spin_unlock(&mq_lock);
285
286 inode = mqueue_get_inode(dir->i_sb, mode, attr);
287 if (!inode) {
288 error = -ENOMEM;
289 spin_lock(&mq_lock);
290 queues_count--;
291 goto out_lock;
292 }
293
294 dir->i_size += DIRENT_SIZE;
295 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
296
297 d_instantiate(dentry, inode);
298 dget(dentry);
299 return 0;
300 out_lock:
301 spin_unlock(&mq_lock);
302 return error;
303 }
304
mqueue_unlink(struct inode * dir,struct dentry * dentry)305 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
306 {
307 struct inode *inode = dentry->d_inode;
308
309 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
310 dir->i_size -= DIRENT_SIZE;
311 drop_nlink(inode);
312 dput(dentry);
313 return 0;
314 }
315
316 /*
317 * This is routine for system read from queue file.
318 * To avoid mess with doing here some sort of mq_receive we allow
319 * to read only queue size & notification info (the only values
320 * that are interesting from user point of view and aren't accessible
321 * through std routines)
322 */
mqueue_read_file(struct file * filp,char __user * u_data,size_t count,loff_t * off)323 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
324 size_t count, loff_t *off)
325 {
326 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
327 char buffer[FILENT_SIZE];
328 ssize_t ret;
329
330 spin_lock(&info->lock);
331 snprintf(buffer, sizeof(buffer),
332 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
333 info->qsize,
334 info->notify_owner ? info->notify.sigev_notify : 0,
335 (info->notify_owner &&
336 info->notify.sigev_notify == SIGEV_SIGNAL) ?
337 info->notify.sigev_signo : 0,
338 pid_vnr(info->notify_owner));
339 spin_unlock(&info->lock);
340 buffer[sizeof(buffer)-1] = '\0';
341
342 ret = simple_read_from_buffer(u_data, count, off, buffer,
343 strlen(buffer));
344 if (ret <= 0)
345 return ret;
346
347 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME;
348 return ret;
349 }
350
mqueue_flush_file(struct file * filp,fl_owner_t id)351 static int mqueue_flush_file(struct file *filp, fl_owner_t id)
352 {
353 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
354
355 spin_lock(&info->lock);
356 if (task_tgid(current) == info->notify_owner)
357 remove_notification(info);
358
359 spin_unlock(&info->lock);
360 return 0;
361 }
362
mqueue_poll_file(struct file * filp,struct poll_table_struct * poll_tab)363 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
364 {
365 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode);
366 int retval = 0;
367
368 poll_wait(filp, &info->wait_q, poll_tab);
369
370 spin_lock(&info->lock);
371 if (info->attr.mq_curmsgs)
372 retval = POLLIN | POLLRDNORM;
373
374 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
375 retval |= POLLOUT | POLLWRNORM;
376 spin_unlock(&info->lock);
377
378 return retval;
379 }
380
381 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
wq_add(struct mqueue_inode_info * info,int sr,struct ext_wait_queue * ewp)382 static void wq_add(struct mqueue_inode_info *info, int sr,
383 struct ext_wait_queue *ewp)
384 {
385 struct ext_wait_queue *walk;
386
387 ewp->task = current;
388
389 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
390 if (walk->task->static_prio <= current->static_prio) {
391 list_add_tail(&ewp->list, &walk->list);
392 return;
393 }
394 }
395 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
396 }
397
398 /*
399 * Puts current task to sleep. Caller must hold queue lock. After return
400 * lock isn't held.
401 * sr: SEND or RECV
402 */
wq_sleep(struct mqueue_inode_info * info,int sr,long timeout,struct ext_wait_queue * ewp)403 static int wq_sleep(struct mqueue_inode_info *info, int sr,
404 long timeout, struct ext_wait_queue *ewp)
405 {
406 int retval;
407 signed long time;
408
409 wq_add(info, sr, ewp);
410
411 for (;;) {
412 set_current_state(TASK_INTERRUPTIBLE);
413
414 spin_unlock(&info->lock);
415 time = schedule_timeout(timeout);
416
417 while (ewp->state == STATE_PENDING)
418 cpu_relax();
419
420 if (ewp->state == STATE_READY) {
421 retval = 0;
422 goto out;
423 }
424 spin_lock(&info->lock);
425 if (ewp->state == STATE_READY) {
426 retval = 0;
427 goto out_unlock;
428 }
429 if (signal_pending(current)) {
430 retval = -ERESTARTSYS;
431 break;
432 }
433 if (time == 0) {
434 retval = -ETIMEDOUT;
435 break;
436 }
437 }
438 list_del(&ewp->list);
439 out_unlock:
440 spin_unlock(&info->lock);
441 out:
442 return retval;
443 }
444
445 /*
446 * Returns waiting task that should be serviced first or NULL if none exists
447 */
wq_get_first_waiter(struct mqueue_inode_info * info,int sr)448 static struct ext_wait_queue *wq_get_first_waiter(
449 struct mqueue_inode_info *info, int sr)
450 {
451 struct list_head *ptr;
452
453 ptr = info->e_wait_q[sr].list.prev;
454 if (ptr == &info->e_wait_q[sr].list)
455 return NULL;
456 return list_entry(ptr, struct ext_wait_queue, list);
457 }
458
459 /* Auxiliary functions to manipulate messages' list */
msg_insert(struct msg_msg * ptr,struct mqueue_inode_info * info)460 static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
461 {
462 int k;
463
464 k = info->attr.mq_curmsgs - 1;
465 while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
466 info->messages[k + 1] = info->messages[k];
467 k--;
468 }
469 info->attr.mq_curmsgs++;
470 info->qsize += ptr->m_ts;
471 info->messages[k + 1] = ptr;
472 }
473
msg_get(struct mqueue_inode_info * info)474 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
475 {
476 info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
477 return info->messages[info->attr.mq_curmsgs];
478 }
479
set_cookie(struct sk_buff * skb,char code)480 static inline void set_cookie(struct sk_buff *skb, char code)
481 {
482 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
483 }
484
485 /*
486 * The next function is only to split too long sys_mq_timedsend
487 */
__do_notify(struct mqueue_inode_info * info)488 static void __do_notify(struct mqueue_inode_info *info)
489 {
490 /* notification
491 * invoked when there is registered process and there isn't process
492 * waiting synchronously for message AND state of queue changed from
493 * empty to not empty. Here we are sure that no one is waiting
494 * synchronously. */
495 if (info->notify_owner &&
496 info->attr.mq_curmsgs == 1) {
497 struct siginfo sig_i;
498 switch (info->notify.sigev_notify) {
499 case SIGEV_NONE:
500 break;
501 case SIGEV_SIGNAL:
502 /* sends signal */
503
504 sig_i.si_signo = info->notify.sigev_signo;
505 sig_i.si_errno = 0;
506 sig_i.si_code = SI_MESGQ;
507 sig_i.si_value = info->notify.sigev_value;
508 sig_i.si_pid = task_tgid_nr_ns(current,
509 ns_of_pid(info->notify_owner));
510 sig_i.si_uid = current_uid();
511
512 kill_pid_info(info->notify.sigev_signo,
513 &sig_i, info->notify_owner);
514 break;
515 case SIGEV_THREAD:
516 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
517 netlink_sendskb(info->notify_sock, info->notify_cookie);
518 break;
519 }
520 /* after notification unregisters process */
521 put_pid(info->notify_owner);
522 info->notify_owner = NULL;
523 }
524 wake_up(&info->wait_q);
525 }
526
prepare_timeout(struct timespec * p)527 static long prepare_timeout(struct timespec *p)
528 {
529 struct timespec nowts;
530 long timeout;
531
532 if (p) {
533 if (unlikely(p->tv_nsec < 0 || p->tv_sec < 0
534 || p->tv_nsec >= NSEC_PER_SEC))
535 return -EINVAL;
536 nowts = CURRENT_TIME;
537 /* first subtract as jiffies can't be too big */
538 p->tv_sec -= nowts.tv_sec;
539 if (p->tv_nsec < nowts.tv_nsec) {
540 p->tv_nsec += NSEC_PER_SEC;
541 p->tv_sec--;
542 }
543 p->tv_nsec -= nowts.tv_nsec;
544 if (p->tv_sec < 0)
545 return 0;
546
547 timeout = timespec_to_jiffies(p) + 1;
548 } else
549 return MAX_SCHEDULE_TIMEOUT;
550
551 return timeout;
552 }
553
remove_notification(struct mqueue_inode_info * info)554 static void remove_notification(struct mqueue_inode_info *info)
555 {
556 if (info->notify_owner != NULL &&
557 info->notify.sigev_notify == SIGEV_THREAD) {
558 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
559 netlink_sendskb(info->notify_sock, info->notify_cookie);
560 }
561 put_pid(info->notify_owner);
562 info->notify_owner = NULL;
563 }
564
mq_attr_ok(struct mq_attr * attr)565 static int mq_attr_ok(struct mq_attr *attr)
566 {
567 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
568 return 0;
569 if (capable(CAP_SYS_RESOURCE)) {
570 if (attr->mq_maxmsg > HARD_MSGMAX)
571 return 0;
572 } else {
573 if (attr->mq_maxmsg > msg_max ||
574 attr->mq_msgsize > msgsize_max)
575 return 0;
576 }
577 /* check for overflow */
578 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
579 return 0;
580 if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) +
581 (attr->mq_maxmsg * sizeof (struct msg_msg *)) <
582 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
583 return 0;
584 return 1;
585 }
586
587 /*
588 * Invoked when creating a new queue via sys_mq_open
589 */
do_create(struct dentry * dir,struct dentry * dentry,int oflag,mode_t mode,struct mq_attr * attr)590 static struct file *do_create(struct dentry *dir, struct dentry *dentry,
591 int oflag, mode_t mode, struct mq_attr *attr)
592 {
593 const struct cred *cred = current_cred();
594 struct file *result;
595 int ret;
596
597 if (attr) {
598 ret = -EINVAL;
599 if (!mq_attr_ok(attr))
600 goto out;
601 /* store for use during create */
602 dentry->d_fsdata = attr;
603 }
604
605 mode &= ~current->fs->umask;
606 ret = mnt_want_write(mqueue_mnt);
607 if (ret)
608 goto out;
609 ret = vfs_create(dir->d_inode, dentry, mode, NULL);
610 dentry->d_fsdata = NULL;
611 if (ret)
612 goto out_drop_write;
613
614 result = dentry_open(dentry, mqueue_mnt, oflag, cred);
615 /*
616 * dentry_open() took a persistent mnt_want_write(),
617 * so we can now drop this one.
618 */
619 mnt_drop_write(mqueue_mnt);
620 return result;
621
622 out_drop_write:
623 mnt_drop_write(mqueue_mnt);
624 out:
625 dput(dentry);
626 mntput(mqueue_mnt);
627 return ERR_PTR(ret);
628 }
629
630 /* Opens existing queue */
do_open(struct dentry * dentry,int oflag)631 static struct file *do_open(struct dentry *dentry, int oflag)
632 {
633 const struct cred *cred = current_cred();
634
635 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
636 MAY_READ | MAY_WRITE };
637
638 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
639 dput(dentry);
640 mntput(mqueue_mnt);
641 return ERR_PTR(-EINVAL);
642 }
643
644 if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
645 dput(dentry);
646 mntput(mqueue_mnt);
647 return ERR_PTR(-EACCES);
648 }
649
650 return dentry_open(dentry, mqueue_mnt, oflag, cred);
651 }
652
SYSCALL_DEFINE4(mq_open,const char __user *,u_name,int,oflag,mode_t,mode,struct mq_attr __user *,u_attr)653 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
654 struct mq_attr __user *, u_attr)
655 {
656 struct dentry *dentry;
657 struct file *filp;
658 char *name;
659 struct mq_attr attr;
660 int fd, error;
661
662 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
663 return -EFAULT;
664
665 audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
666
667 if (IS_ERR(name = getname(u_name)))
668 return PTR_ERR(name);
669
670 fd = get_unused_fd_flags(O_CLOEXEC);
671 if (fd < 0)
672 goto out_putname;
673
674 mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
675 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
676 if (IS_ERR(dentry)) {
677 error = PTR_ERR(dentry);
678 goto out_err;
679 }
680 mntget(mqueue_mnt);
681
682 if (oflag & O_CREAT) {
683 if (dentry->d_inode) { /* entry already exists */
684 audit_inode(name, dentry);
685 error = -EEXIST;
686 if (oflag & O_EXCL)
687 goto out;
688 filp = do_open(dentry, oflag);
689 } else {
690 filp = do_create(mqueue_mnt->mnt_root, dentry,
691 oflag, mode,
692 u_attr ? &attr : NULL);
693 }
694 } else {
695 error = -ENOENT;
696 if (!dentry->d_inode)
697 goto out;
698 audit_inode(name, dentry);
699 filp = do_open(dentry, oflag);
700 }
701
702 if (IS_ERR(filp)) {
703 error = PTR_ERR(filp);
704 goto out_putfd;
705 }
706
707 fd_install(fd, filp);
708 goto out_upsem;
709
710 out:
711 dput(dentry);
712 mntput(mqueue_mnt);
713 out_putfd:
714 put_unused_fd(fd);
715 out_err:
716 fd = error;
717 out_upsem:
718 mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
719 out_putname:
720 putname(name);
721 return fd;
722 }
723
SYSCALL_DEFINE1(mq_unlink,const char __user *,u_name)724 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
725 {
726 int err;
727 char *name;
728 struct dentry *dentry;
729 struct inode *inode = NULL;
730
731 name = getname(u_name);
732 if (IS_ERR(name))
733 return PTR_ERR(name);
734
735 mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex,
736 I_MUTEX_PARENT);
737 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
738 if (IS_ERR(dentry)) {
739 err = PTR_ERR(dentry);
740 goto out_unlock;
741 }
742
743 if (!dentry->d_inode) {
744 err = -ENOENT;
745 goto out_err;
746 }
747
748 inode = dentry->d_inode;
749 if (inode)
750 atomic_inc(&inode->i_count);
751 err = mnt_want_write(mqueue_mnt);
752 if (err)
753 goto out_err;
754 err = vfs_unlink(dentry->d_parent->d_inode, dentry);
755 mnt_drop_write(mqueue_mnt);
756 out_err:
757 dput(dentry);
758
759 out_unlock:
760 mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
761 putname(name);
762 if (inode)
763 iput(inode);
764
765 return err;
766 }
767
768 /* Pipelined send and receive functions.
769 *
770 * If a receiver finds no waiting message, then it registers itself in the
771 * list of waiting receivers. A sender checks that list before adding the new
772 * message into the message array. If there is a waiting receiver, then it
773 * bypasses the message array and directly hands the message over to the
774 * receiver.
775 * The receiver accepts the message and returns without grabbing the queue
776 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
777 * are necessary. The same algorithm is used for sysv semaphores, see
778 * ipc/sem.c for more details.
779 *
780 * The same algorithm is used for senders.
781 */
782
783 /* pipelined_send() - send a message directly to the task waiting in
784 * sys_mq_timedreceive() (without inserting message into a queue).
785 */
pipelined_send(struct mqueue_inode_info * info,struct msg_msg * message,struct ext_wait_queue * receiver)786 static inline void pipelined_send(struct mqueue_inode_info *info,
787 struct msg_msg *message,
788 struct ext_wait_queue *receiver)
789 {
790 receiver->msg = message;
791 list_del(&receiver->list);
792 receiver->state = STATE_PENDING;
793 wake_up_process(receiver->task);
794 smp_wmb();
795 receiver->state = STATE_READY;
796 }
797
798 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
799 * gets its message and put to the queue (we have one free place for sure). */
pipelined_receive(struct mqueue_inode_info * info)800 static inline void pipelined_receive(struct mqueue_inode_info *info)
801 {
802 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
803
804 if (!sender) {
805 /* for poll */
806 wake_up_interruptible(&info->wait_q);
807 return;
808 }
809 msg_insert(sender->msg, info);
810 list_del(&sender->list);
811 sender->state = STATE_PENDING;
812 wake_up_process(sender->task);
813 smp_wmb();
814 sender->state = STATE_READY;
815 }
816
SYSCALL_DEFINE5(mq_timedsend,mqd_t,mqdes,const char __user *,u_msg_ptr,size_t,msg_len,unsigned int,msg_prio,const struct timespec __user *,u_abs_timeout)817 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
818 size_t, msg_len, unsigned int, msg_prio,
819 const struct timespec __user *, u_abs_timeout)
820 {
821 struct file *filp;
822 struct inode *inode;
823 struct ext_wait_queue wait;
824 struct ext_wait_queue *receiver;
825 struct msg_msg *msg_ptr;
826 struct mqueue_inode_info *info;
827 struct timespec ts, *p = NULL;
828 long timeout;
829 int ret;
830
831 if (u_abs_timeout) {
832 if (copy_from_user(&ts, u_abs_timeout,
833 sizeof(struct timespec)))
834 return -EFAULT;
835 p = &ts;
836 }
837
838 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
839 return -EINVAL;
840
841 audit_mq_sendrecv(mqdes, msg_len, msg_prio, p);
842 timeout = prepare_timeout(p);
843
844 ret = -EBADF;
845 filp = fget(mqdes);
846 if (unlikely(!filp))
847 goto out;
848
849 inode = filp->f_path.dentry->d_inode;
850 if (unlikely(filp->f_op != &mqueue_file_operations))
851 goto out_fput;
852 info = MQUEUE_I(inode);
853 audit_inode(NULL, filp->f_path.dentry);
854
855 if (unlikely(!(filp->f_mode & FMODE_WRITE)))
856 goto out_fput;
857
858 if (unlikely(msg_len > info->attr.mq_msgsize)) {
859 ret = -EMSGSIZE;
860 goto out_fput;
861 }
862
863 /* First try to allocate memory, before doing anything with
864 * existing queues. */
865 msg_ptr = load_msg(u_msg_ptr, msg_len);
866 if (IS_ERR(msg_ptr)) {
867 ret = PTR_ERR(msg_ptr);
868 goto out_fput;
869 }
870 msg_ptr->m_ts = msg_len;
871 msg_ptr->m_type = msg_prio;
872
873 spin_lock(&info->lock);
874
875 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
876 if (filp->f_flags & O_NONBLOCK) {
877 spin_unlock(&info->lock);
878 ret = -EAGAIN;
879 } else if (unlikely(timeout < 0)) {
880 spin_unlock(&info->lock);
881 ret = timeout;
882 } else {
883 wait.task = current;
884 wait.msg = (void *) msg_ptr;
885 wait.state = STATE_NONE;
886 ret = wq_sleep(info, SEND, timeout, &wait);
887 }
888 if (ret < 0)
889 free_msg(msg_ptr);
890 } else {
891 receiver = wq_get_first_waiter(info, RECV);
892 if (receiver) {
893 pipelined_send(info, msg_ptr, receiver);
894 } else {
895 /* adds message to the queue */
896 msg_insert(msg_ptr, info);
897 __do_notify(info);
898 }
899 inode->i_atime = inode->i_mtime = inode->i_ctime =
900 CURRENT_TIME;
901 spin_unlock(&info->lock);
902 ret = 0;
903 }
904 out_fput:
905 fput(filp);
906 out:
907 return ret;
908 }
909
SYSCALL_DEFINE5(mq_timedreceive,mqd_t,mqdes,char __user *,u_msg_ptr,size_t,msg_len,unsigned int __user *,u_msg_prio,const struct timespec __user *,u_abs_timeout)910 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
911 size_t, msg_len, unsigned int __user *, u_msg_prio,
912 const struct timespec __user *, u_abs_timeout)
913 {
914 long timeout;
915 ssize_t ret;
916 struct msg_msg *msg_ptr;
917 struct file *filp;
918 struct inode *inode;
919 struct mqueue_inode_info *info;
920 struct ext_wait_queue wait;
921 struct timespec ts, *p = NULL;
922
923 if (u_abs_timeout) {
924 if (copy_from_user(&ts, u_abs_timeout,
925 sizeof(struct timespec)))
926 return -EFAULT;
927 p = &ts;
928 }
929
930 audit_mq_sendrecv(mqdes, msg_len, 0, p);
931 timeout = prepare_timeout(p);
932
933 ret = -EBADF;
934 filp = fget(mqdes);
935 if (unlikely(!filp))
936 goto out;
937
938 inode = filp->f_path.dentry->d_inode;
939 if (unlikely(filp->f_op != &mqueue_file_operations))
940 goto out_fput;
941 info = MQUEUE_I(inode);
942 audit_inode(NULL, filp->f_path.dentry);
943
944 if (unlikely(!(filp->f_mode & FMODE_READ)))
945 goto out_fput;
946
947 /* checks if buffer is big enough */
948 if (unlikely(msg_len < info->attr.mq_msgsize)) {
949 ret = -EMSGSIZE;
950 goto out_fput;
951 }
952
953 spin_lock(&info->lock);
954 if (info->attr.mq_curmsgs == 0) {
955 if (filp->f_flags & O_NONBLOCK) {
956 spin_unlock(&info->lock);
957 ret = -EAGAIN;
958 msg_ptr = NULL;
959 } else if (unlikely(timeout < 0)) {
960 spin_unlock(&info->lock);
961 ret = timeout;
962 msg_ptr = NULL;
963 } else {
964 wait.task = current;
965 wait.state = STATE_NONE;
966 ret = wq_sleep(info, RECV, timeout, &wait);
967 msg_ptr = wait.msg;
968 }
969 } else {
970 msg_ptr = msg_get(info);
971
972 inode->i_atime = inode->i_mtime = inode->i_ctime =
973 CURRENT_TIME;
974
975 /* There is now free space in queue. */
976 pipelined_receive(info);
977 spin_unlock(&info->lock);
978 ret = 0;
979 }
980 if (ret == 0) {
981 ret = msg_ptr->m_ts;
982
983 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
984 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
985 ret = -EFAULT;
986 }
987 free_msg(msg_ptr);
988 }
989 out_fput:
990 fput(filp);
991 out:
992 return ret;
993 }
994
995 /*
996 * Notes: the case when user wants us to deregister (with NULL as pointer)
997 * and he isn't currently owner of notification, will be silently discarded.
998 * It isn't explicitly defined in the POSIX.
999 */
SYSCALL_DEFINE2(mq_notify,mqd_t,mqdes,const struct sigevent __user *,u_notification)1000 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1001 const struct sigevent __user *, u_notification)
1002 {
1003 int ret;
1004 struct file *filp;
1005 struct sock *sock;
1006 struct inode *inode;
1007 struct sigevent notification;
1008 struct mqueue_inode_info *info;
1009 struct sk_buff *nc;
1010
1011 if (u_notification) {
1012 if (copy_from_user(¬ification, u_notification,
1013 sizeof(struct sigevent)))
1014 return -EFAULT;
1015 }
1016
1017 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL);
1018
1019 nc = NULL;
1020 sock = NULL;
1021 if (u_notification != NULL) {
1022 if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1023 notification.sigev_notify != SIGEV_SIGNAL &&
1024 notification.sigev_notify != SIGEV_THREAD))
1025 return -EINVAL;
1026 if (notification.sigev_notify == SIGEV_SIGNAL &&
1027 !valid_signal(notification.sigev_signo)) {
1028 return -EINVAL;
1029 }
1030 if (notification.sigev_notify == SIGEV_THREAD) {
1031 long timeo;
1032
1033 /* create the notify skb */
1034 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1035 ret = -ENOMEM;
1036 if (!nc)
1037 goto out;
1038 ret = -EFAULT;
1039 if (copy_from_user(nc->data,
1040 notification.sigev_value.sival_ptr,
1041 NOTIFY_COOKIE_LEN)) {
1042 goto out;
1043 }
1044
1045 /* TODO: add a header? */
1046 skb_put(nc, NOTIFY_COOKIE_LEN);
1047 /* and attach it to the socket */
1048 retry:
1049 filp = fget(notification.sigev_signo);
1050 ret = -EBADF;
1051 if (!filp)
1052 goto out;
1053 sock = netlink_getsockbyfilp(filp);
1054 fput(filp);
1055 if (IS_ERR(sock)) {
1056 ret = PTR_ERR(sock);
1057 sock = NULL;
1058 goto out;
1059 }
1060
1061 timeo = MAX_SCHEDULE_TIMEOUT;
1062 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1063 if (ret == 1)
1064 goto retry;
1065 if (ret) {
1066 sock = NULL;
1067 nc = NULL;
1068 goto out;
1069 }
1070 }
1071 }
1072
1073 ret = -EBADF;
1074 filp = fget(mqdes);
1075 if (!filp)
1076 goto out;
1077
1078 inode = filp->f_path.dentry->d_inode;
1079 if (unlikely(filp->f_op != &mqueue_file_operations))
1080 goto out_fput;
1081 info = MQUEUE_I(inode);
1082
1083 ret = 0;
1084 spin_lock(&info->lock);
1085 if (u_notification == NULL) {
1086 if (info->notify_owner == task_tgid(current)) {
1087 remove_notification(info);
1088 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1089 }
1090 } else if (info->notify_owner != NULL) {
1091 ret = -EBUSY;
1092 } else {
1093 switch (notification.sigev_notify) {
1094 case SIGEV_NONE:
1095 info->notify.sigev_notify = SIGEV_NONE;
1096 break;
1097 case SIGEV_THREAD:
1098 info->notify_sock = sock;
1099 info->notify_cookie = nc;
1100 sock = NULL;
1101 nc = NULL;
1102 info->notify.sigev_notify = SIGEV_THREAD;
1103 break;
1104 case SIGEV_SIGNAL:
1105 info->notify.sigev_signo = notification.sigev_signo;
1106 info->notify.sigev_value = notification.sigev_value;
1107 info->notify.sigev_notify = SIGEV_SIGNAL;
1108 break;
1109 }
1110
1111 info->notify_owner = get_pid(task_tgid(current));
1112 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1113 }
1114 spin_unlock(&info->lock);
1115 out_fput:
1116 fput(filp);
1117 out:
1118 if (sock) {
1119 netlink_detachskb(sock, nc);
1120 } else if (nc) {
1121 dev_kfree_skb(nc);
1122 }
1123 return ret;
1124 }
1125
SYSCALL_DEFINE3(mq_getsetattr,mqd_t,mqdes,const struct mq_attr __user *,u_mqstat,struct mq_attr __user *,u_omqstat)1126 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1127 const struct mq_attr __user *, u_mqstat,
1128 struct mq_attr __user *, u_omqstat)
1129 {
1130 int ret;
1131 struct mq_attr mqstat, omqstat;
1132 struct file *filp;
1133 struct inode *inode;
1134 struct mqueue_inode_info *info;
1135
1136 if (u_mqstat != NULL) {
1137 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1138 return -EFAULT;
1139 if (mqstat.mq_flags & (~O_NONBLOCK))
1140 return -EINVAL;
1141 }
1142
1143 ret = -EBADF;
1144 filp = fget(mqdes);
1145 if (!filp)
1146 goto out;
1147
1148 inode = filp->f_path.dentry->d_inode;
1149 if (unlikely(filp->f_op != &mqueue_file_operations))
1150 goto out_fput;
1151 info = MQUEUE_I(inode);
1152
1153 spin_lock(&info->lock);
1154
1155 omqstat = info->attr;
1156 omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1157 if (u_mqstat) {
1158 audit_mq_getsetattr(mqdes, &mqstat);
1159 if (mqstat.mq_flags & O_NONBLOCK)
1160 filp->f_flags |= O_NONBLOCK;
1161 else
1162 filp->f_flags &= ~O_NONBLOCK;
1163
1164 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1165 }
1166
1167 spin_unlock(&info->lock);
1168
1169 ret = 0;
1170 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1171 sizeof(struct mq_attr)))
1172 ret = -EFAULT;
1173
1174 out_fput:
1175 fput(filp);
1176 out:
1177 return ret;
1178 }
1179
1180 static const struct inode_operations mqueue_dir_inode_operations = {
1181 .lookup = simple_lookup,
1182 .create = mqueue_create,
1183 .unlink = mqueue_unlink,
1184 };
1185
1186 static const struct file_operations mqueue_file_operations = {
1187 .flush = mqueue_flush_file,
1188 .poll = mqueue_poll_file,
1189 .read = mqueue_read_file,
1190 };
1191
1192 static struct super_operations mqueue_super_ops = {
1193 .alloc_inode = mqueue_alloc_inode,
1194 .destroy_inode = mqueue_destroy_inode,
1195 .statfs = simple_statfs,
1196 .delete_inode = mqueue_delete_inode,
1197 .drop_inode = generic_delete_inode,
1198 };
1199
1200 static struct file_system_type mqueue_fs_type = {
1201 .name = "mqueue",
1202 .get_sb = mqueue_get_sb,
1203 .kill_sb = kill_litter_super,
1204 };
1205
1206 static int msg_max_limit_min = MIN_MSGMAX;
1207 static int msg_max_limit_max = MAX_MSGMAX;
1208
1209 static int msg_maxsize_limit_min = MIN_MSGSIZEMAX;
1210 static int msg_maxsize_limit_max = MAX_MSGSIZEMAX;
1211
1212 static ctl_table mq_sysctls[] = {
1213 {
1214 .procname = "queues_max",
1215 .data = &queues_max,
1216 .maxlen = sizeof(int),
1217 .mode = 0644,
1218 .proc_handler = &proc_dointvec,
1219 },
1220 {
1221 .procname = "msg_max",
1222 .data = &msg_max,
1223 .maxlen = sizeof(int),
1224 .mode = 0644,
1225 .proc_handler = &proc_dointvec_minmax,
1226 .extra1 = &msg_max_limit_min,
1227 .extra2 = &msg_max_limit_max,
1228 },
1229 {
1230 .procname = "msgsize_max",
1231 .data = &msgsize_max,
1232 .maxlen = sizeof(int),
1233 .mode = 0644,
1234 .proc_handler = &proc_dointvec_minmax,
1235 .extra1 = &msg_maxsize_limit_min,
1236 .extra2 = &msg_maxsize_limit_max,
1237 },
1238 { .ctl_name = 0 }
1239 };
1240
1241 static ctl_table mq_sysctl_dir[] = {
1242 {
1243 .procname = "mqueue",
1244 .mode = 0555,
1245 .child = mq_sysctls,
1246 },
1247 { .ctl_name = 0 }
1248 };
1249
1250 static ctl_table mq_sysctl_root[] = {
1251 {
1252 .ctl_name = CTL_FS,
1253 .procname = "fs",
1254 .mode = 0555,
1255 .child = mq_sysctl_dir,
1256 },
1257 { .ctl_name = 0 }
1258 };
1259
init_mqueue_fs(void)1260 static int __init init_mqueue_fs(void)
1261 {
1262 int error;
1263
1264 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1265 sizeof(struct mqueue_inode_info), 0,
1266 SLAB_HWCACHE_ALIGN, init_once);
1267 if (mqueue_inode_cachep == NULL)
1268 return -ENOMEM;
1269
1270 /* ignore failues - they are not fatal */
1271 mq_sysctl_table = register_sysctl_table(mq_sysctl_root);
1272
1273 error = register_filesystem(&mqueue_fs_type);
1274 if (error)
1275 goto out_sysctl;
1276
1277 if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) {
1278 error = PTR_ERR(mqueue_mnt);
1279 goto out_filesystem;
1280 }
1281
1282 /* internal initialization - not common for vfs */
1283 queues_count = 0;
1284 spin_lock_init(&mq_lock);
1285
1286 return 0;
1287
1288 out_filesystem:
1289 unregister_filesystem(&mqueue_fs_type);
1290 out_sysctl:
1291 if (mq_sysctl_table)
1292 unregister_sysctl_table(mq_sysctl_table);
1293 kmem_cache_destroy(mqueue_inode_cachep);
1294 return error;
1295 }
1296
1297 __initcall(init_mqueue_fs);
1298