• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * POSIX message queues filesystem for Linux.
3  *
4  * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
5  *                          Michal Wronski          (michal.wronski@gmail.com)
6  *
7  * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
8  * Lockless receive & send, fd based notify:
9  *			    Manfred Spraul	    (manfred@colorfullife.com)
10  *
11  * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
12  *
13  * This file is released under the GPL.
14  */
15 
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/sysctl.h>
23 #include <linux/poll.h>
24 #include <linux/mqueue.h>
25 #include <linux/msg.h>
26 #include <linux/skbuff.h>
27 #include <linux/vmalloc.h>
28 #include <linux/netlink.h>
29 #include <linux/syscalls.h>
30 #include <linux/audit.h>
31 #include <linux/signal.h>
32 #include <linux/mutex.h>
33 #include <linux/nsproxy.h>
34 #include <linux/pid.h>
35 #include <linux/ipc_namespace.h>
36 #include <linux/user_namespace.h>
37 #include <linux/slab.h>
38 
39 #include <net/sock.h>
40 #include "util.h"
41 
42 #define MQUEUE_MAGIC	0x19800202
43 #define DIRENT_SIZE	20
44 #define FILENT_SIZE	80
45 
46 #define SEND		0
47 #define RECV		1
48 
49 #define STATE_NONE	0
50 #define STATE_READY	1
51 
52 struct posix_msg_tree_node {
53 	struct rb_node		rb_node;
54 	struct list_head	msg_list;
55 	int			priority;
56 };
57 
58 struct ext_wait_queue {		/* queue of sleeping tasks */
59 	struct task_struct *task;
60 	struct list_head list;
61 	struct msg_msg *msg;	/* ptr of loaded message */
62 	int state;		/* one of STATE_* values */
63 };
64 
65 struct mqueue_inode_info {
66 	spinlock_t lock;
67 	struct inode vfs_inode;
68 	wait_queue_head_t wait_q;
69 
70 	struct rb_root msg_tree;
71 	struct posix_msg_tree_node *node_cache;
72 	struct mq_attr attr;
73 
74 	struct sigevent notify;
75 	struct pid *notify_owner;
76 	struct user_namespace *notify_user_ns;
77 	struct user_struct *user;	/* user who created, for accounting */
78 	struct sock *notify_sock;
79 	struct sk_buff *notify_cookie;
80 
81 	/* for tasks waiting for free space and messages, respectively */
82 	struct ext_wait_queue e_wait_q[2];
83 
84 	unsigned long qsize; /* size of queue in memory (sum of all msgs) */
85 };
86 
87 static const struct inode_operations mqueue_dir_inode_operations;
88 static const struct file_operations mqueue_file_operations;
89 static const struct super_operations mqueue_super_ops;
90 static void remove_notification(struct mqueue_inode_info *info);
91 
92 static struct kmem_cache *mqueue_inode_cachep;
93 
94 static struct ctl_table_header *mq_sysctl_table;
95 
MQUEUE_I(struct inode * inode)96 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
97 {
98 	return container_of(inode, struct mqueue_inode_info, vfs_inode);
99 }
100 
101 /*
102  * This routine should be called with the mq_lock held.
103  */
__get_ns_from_inode(struct inode * inode)104 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
105 {
106 	return get_ipc_ns(inode->i_sb->s_fs_info);
107 }
108 
get_ns_from_inode(struct inode * inode)109 static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
110 {
111 	struct ipc_namespace *ns;
112 
113 	spin_lock(&mq_lock);
114 	ns = __get_ns_from_inode(inode);
115 	spin_unlock(&mq_lock);
116 	return ns;
117 }
118 
119 /* Auxiliary functions to manipulate messages' list */
msg_insert(struct msg_msg * msg,struct mqueue_inode_info * info)120 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
121 {
122 	struct rb_node **p, *parent = NULL;
123 	struct posix_msg_tree_node *leaf;
124 
125 	p = &info->msg_tree.rb_node;
126 	while (*p) {
127 		parent = *p;
128 		leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
129 
130 		if (likely(leaf->priority == msg->m_type))
131 			goto insert_msg;
132 		else if (msg->m_type < leaf->priority)
133 			p = &(*p)->rb_left;
134 		else
135 			p = &(*p)->rb_right;
136 	}
137 	if (info->node_cache) {
138 		leaf = info->node_cache;
139 		info->node_cache = NULL;
140 	} else {
141 		leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
142 		if (!leaf)
143 			return -ENOMEM;
144 		INIT_LIST_HEAD(&leaf->msg_list);
145 	}
146 	leaf->priority = msg->m_type;
147 	rb_link_node(&leaf->rb_node, parent, p);
148 	rb_insert_color(&leaf->rb_node, &info->msg_tree);
149 insert_msg:
150 	info->attr.mq_curmsgs++;
151 	info->qsize += msg->m_ts;
152 	list_add_tail(&msg->m_list, &leaf->msg_list);
153 	return 0;
154 }
155 
msg_get(struct mqueue_inode_info * info)156 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
157 {
158 	struct rb_node **p, *parent = NULL;
159 	struct posix_msg_tree_node *leaf;
160 	struct msg_msg *msg;
161 
162 try_again:
163 	p = &info->msg_tree.rb_node;
164 	while (*p) {
165 		parent = *p;
166 		/*
167 		 * During insert, low priorities go to the left and high to the
168 		 * right.  On receive, we want the highest priorities first, so
169 		 * walk all the way to the right.
170 		 */
171 		p = &(*p)->rb_right;
172 	}
173 	if (!parent) {
174 		if (info->attr.mq_curmsgs) {
175 			pr_warn_once("Inconsistency in POSIX message queue, "
176 				     "no tree element, but supposedly messages "
177 				     "should exist!\n");
178 			info->attr.mq_curmsgs = 0;
179 		}
180 		return NULL;
181 	}
182 	leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
183 	if (unlikely(list_empty(&leaf->msg_list))) {
184 		pr_warn_once("Inconsistency in POSIX message queue, "
185 			     "empty leaf node but we haven't implemented "
186 			     "lazy leaf delete!\n");
187 		rb_erase(&leaf->rb_node, &info->msg_tree);
188 		if (info->node_cache) {
189 			kfree(leaf);
190 		} else {
191 			info->node_cache = leaf;
192 		}
193 		goto try_again;
194 	} else {
195 		msg = list_first_entry(&leaf->msg_list,
196 				       struct msg_msg, m_list);
197 		list_del(&msg->m_list);
198 		if (list_empty(&leaf->msg_list)) {
199 			rb_erase(&leaf->rb_node, &info->msg_tree);
200 			if (info->node_cache) {
201 				kfree(leaf);
202 			} else {
203 				info->node_cache = leaf;
204 			}
205 		}
206 	}
207 	info->attr.mq_curmsgs--;
208 	info->qsize -= msg->m_ts;
209 	return msg;
210 }
211 
mqueue_get_inode(struct super_block * sb,struct ipc_namespace * ipc_ns,umode_t mode,struct mq_attr * attr)212 static struct inode *mqueue_get_inode(struct super_block *sb,
213 		struct ipc_namespace *ipc_ns, umode_t mode,
214 		struct mq_attr *attr)
215 {
216 	struct user_struct *u = current_user();
217 	struct inode *inode;
218 	int ret = -ENOMEM;
219 
220 	inode = new_inode(sb);
221 	if (!inode)
222 		goto err;
223 
224 	inode->i_ino = get_next_ino();
225 	inode->i_mode = mode;
226 	inode->i_uid = current_fsuid();
227 	inode->i_gid = current_fsgid();
228 	inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
229 
230 	if (S_ISREG(mode)) {
231 		struct mqueue_inode_info *info;
232 		unsigned long mq_bytes, mq_treesize;
233 
234 		inode->i_fop = &mqueue_file_operations;
235 		inode->i_size = FILENT_SIZE;
236 		/* mqueue specific info */
237 		info = MQUEUE_I(inode);
238 		spin_lock_init(&info->lock);
239 		init_waitqueue_head(&info->wait_q);
240 		INIT_LIST_HEAD(&info->e_wait_q[0].list);
241 		INIT_LIST_HEAD(&info->e_wait_q[1].list);
242 		info->notify_owner = NULL;
243 		info->notify_user_ns = NULL;
244 		info->qsize = 0;
245 		info->user = NULL;	/* set when all is ok */
246 		info->msg_tree = RB_ROOT;
247 		info->node_cache = NULL;
248 		memset(&info->attr, 0, sizeof(info->attr));
249 		info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
250 					   ipc_ns->mq_msg_default);
251 		info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
252 					    ipc_ns->mq_msgsize_default);
253 		if (attr) {
254 			info->attr.mq_maxmsg = attr->mq_maxmsg;
255 			info->attr.mq_msgsize = attr->mq_msgsize;
256 		}
257 		/*
258 		 * We used to allocate a static array of pointers and account
259 		 * the size of that array as well as one msg_msg struct per
260 		 * possible message into the queue size. That's no longer
261 		 * accurate as the queue is now an rbtree and will grow and
262 		 * shrink depending on usage patterns.  We can, however, still
263 		 * account one msg_msg struct per message, but the nodes are
264 		 * allocated depending on priority usage, and most programs
265 		 * only use one, or a handful, of priorities.  However, since
266 		 * this is pinned memory, we need to assume worst case, so
267 		 * that means the min(mq_maxmsg, max_priorities) * struct
268 		 * posix_msg_tree_node.
269 		 */
270 		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
271 			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
272 			sizeof(struct posix_msg_tree_node);
273 
274 		mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
275 					  info->attr.mq_msgsize);
276 
277 		spin_lock(&mq_lock);
278 		if (u->mq_bytes + mq_bytes < u->mq_bytes ||
279 		    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
280 			spin_unlock(&mq_lock);
281 			/* mqueue_evict_inode() releases info->messages */
282 			ret = -EMFILE;
283 			goto out_inode;
284 		}
285 		u->mq_bytes += mq_bytes;
286 		spin_unlock(&mq_lock);
287 
288 		/* all is ok */
289 		info->user = get_uid(u);
290 	} else if (S_ISDIR(mode)) {
291 		inc_nlink(inode);
292 		/* Some things misbehave if size == 0 on a directory */
293 		inode->i_size = 2 * DIRENT_SIZE;
294 		inode->i_op = &mqueue_dir_inode_operations;
295 		inode->i_fop = &simple_dir_operations;
296 	}
297 
298 	return inode;
299 out_inode:
300 	iput(inode);
301 err:
302 	return ERR_PTR(ret);
303 }
304 
mqueue_fill_super(struct super_block * sb,void * data,int silent)305 static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
306 {
307 	struct inode *inode;
308 	struct ipc_namespace *ns = data;
309 
310 	sb->s_blocksize = PAGE_CACHE_SIZE;
311 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
312 	sb->s_magic = MQUEUE_MAGIC;
313 	sb->s_op = &mqueue_super_ops;
314 
315 	inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
316 	if (IS_ERR(inode))
317 		return PTR_ERR(inode);
318 
319 	sb->s_root = d_make_root(inode);
320 	if (!sb->s_root)
321 		return -ENOMEM;
322 	return 0;
323 }
324 
mqueue_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)325 static struct dentry *mqueue_mount(struct file_system_type *fs_type,
326 			 int flags, const char *dev_name,
327 			 void *data)
328 {
329 	if (!(flags & MS_KERNMOUNT)) {
330 		struct ipc_namespace *ns = current->nsproxy->ipc_ns;
331 		/* Don't allow mounting unless the caller has CAP_SYS_ADMIN
332 		 * over the ipc namespace.
333 		 */
334 		if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
335 			return ERR_PTR(-EPERM);
336 
337 		data = ns;
338 	}
339 	return mount_ns(fs_type, flags, data, mqueue_fill_super);
340 }
341 
init_once(void * foo)342 static void init_once(void *foo)
343 {
344 	struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
345 
346 	inode_init_once(&p->vfs_inode);
347 }
348 
mqueue_alloc_inode(struct super_block * sb)349 static struct inode *mqueue_alloc_inode(struct super_block *sb)
350 {
351 	struct mqueue_inode_info *ei;
352 
353 	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
354 	if (!ei)
355 		return NULL;
356 	return &ei->vfs_inode;
357 }
358 
mqueue_i_callback(struct rcu_head * head)359 static void mqueue_i_callback(struct rcu_head *head)
360 {
361 	struct inode *inode = container_of(head, struct inode, i_rcu);
362 	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
363 }
364 
mqueue_destroy_inode(struct inode * inode)365 static void mqueue_destroy_inode(struct inode *inode)
366 {
367 	call_rcu(&inode->i_rcu, mqueue_i_callback);
368 }
369 
mqueue_evict_inode(struct inode * inode)370 static void mqueue_evict_inode(struct inode *inode)
371 {
372 	struct mqueue_inode_info *info;
373 	struct user_struct *user;
374 	struct ipc_namespace *ipc_ns;
375 	struct msg_msg *msg, *nmsg;
376 	LIST_HEAD(tmp_msg);
377 
378 	clear_inode(inode);
379 
380 	if (S_ISDIR(inode->i_mode))
381 		return;
382 
383 	ipc_ns = get_ns_from_inode(inode);
384 	info = MQUEUE_I(inode);
385 	spin_lock(&info->lock);
386 	while ((msg = msg_get(info)) != NULL)
387 		list_add_tail(&msg->m_list, &tmp_msg);
388 	kfree(info->node_cache);
389 	spin_unlock(&info->lock);
390 
391 	list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
392 		list_del(&msg->m_list);
393 		free_msg(msg);
394 	}
395 
396 	user = info->user;
397 	if (user) {
398 		unsigned long mq_bytes, mq_treesize;
399 
400 		/* Total amount of bytes accounted for the mqueue */
401 		mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
402 			min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
403 			sizeof(struct posix_msg_tree_node);
404 
405 		mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
406 					  info->attr.mq_msgsize);
407 
408 		spin_lock(&mq_lock);
409 		user->mq_bytes -= mq_bytes;
410 		/*
411 		 * get_ns_from_inode() ensures that the
412 		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
413 		 * to which we now hold a reference, or it is NULL.
414 		 * We can't put it here under mq_lock, though.
415 		 */
416 		if (ipc_ns)
417 			ipc_ns->mq_queues_count--;
418 		spin_unlock(&mq_lock);
419 		free_uid(user);
420 	}
421 	if (ipc_ns)
422 		put_ipc_ns(ipc_ns);
423 }
424 
mqueue_create(struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)425 static int mqueue_create(struct inode *dir, struct dentry *dentry,
426 				umode_t mode, bool excl)
427 {
428 	struct inode *inode;
429 	struct mq_attr *attr = dentry->d_fsdata;
430 	int error;
431 	struct ipc_namespace *ipc_ns;
432 
433 	spin_lock(&mq_lock);
434 	ipc_ns = __get_ns_from_inode(dir);
435 	if (!ipc_ns) {
436 		error = -EACCES;
437 		goto out_unlock;
438 	}
439 
440 	if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
441 	    !capable(CAP_SYS_RESOURCE)) {
442 		error = -ENOSPC;
443 		goto out_unlock;
444 	}
445 	ipc_ns->mq_queues_count++;
446 	spin_unlock(&mq_lock);
447 
448 	inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
449 	if (IS_ERR(inode)) {
450 		error = PTR_ERR(inode);
451 		spin_lock(&mq_lock);
452 		ipc_ns->mq_queues_count--;
453 		goto out_unlock;
454 	}
455 
456 	put_ipc_ns(ipc_ns);
457 	dir->i_size += DIRENT_SIZE;
458 	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
459 
460 	d_instantiate(dentry, inode);
461 	dget(dentry);
462 	return 0;
463 out_unlock:
464 	spin_unlock(&mq_lock);
465 	if (ipc_ns)
466 		put_ipc_ns(ipc_ns);
467 	return error;
468 }
469 
mqueue_unlink(struct inode * dir,struct dentry * dentry)470 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
471 {
472 	struct inode *inode = d_inode(dentry);
473 
474 	dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
475 	dir->i_size -= DIRENT_SIZE;
476 	drop_nlink(inode);
477 	dput(dentry);
478 	return 0;
479 }
480 
481 /*
482 *	This is routine for system read from queue file.
483 *	To avoid mess with doing here some sort of mq_receive we allow
484 *	to read only queue size & notification info (the only values
485 *	that are interesting from user point of view and aren't accessible
486 *	through std routines)
487 */
mqueue_read_file(struct file * filp,char __user * u_data,size_t count,loff_t * off)488 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
489 				size_t count, loff_t *off)
490 {
491 	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
492 	char buffer[FILENT_SIZE];
493 	ssize_t ret;
494 
495 	spin_lock(&info->lock);
496 	snprintf(buffer, sizeof(buffer),
497 			"QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
498 			info->qsize,
499 			info->notify_owner ? info->notify.sigev_notify : 0,
500 			(info->notify_owner &&
501 			 info->notify.sigev_notify == SIGEV_SIGNAL) ?
502 				info->notify.sigev_signo : 0,
503 			pid_vnr(info->notify_owner));
504 	spin_unlock(&info->lock);
505 	buffer[sizeof(buffer)-1] = '\0';
506 
507 	ret = simple_read_from_buffer(u_data, count, off, buffer,
508 				strlen(buffer));
509 	if (ret <= 0)
510 		return ret;
511 
512 	file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME;
513 	return ret;
514 }
515 
mqueue_flush_file(struct file * filp,fl_owner_t id)516 static int mqueue_flush_file(struct file *filp, fl_owner_t id)
517 {
518 	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
519 
520 	spin_lock(&info->lock);
521 	if (task_tgid(current) == info->notify_owner)
522 		remove_notification(info);
523 
524 	spin_unlock(&info->lock);
525 	return 0;
526 }
527 
mqueue_poll_file(struct file * filp,struct poll_table_struct * poll_tab)528 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
529 {
530 	struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
531 	int retval = 0;
532 
533 	poll_wait(filp, &info->wait_q, poll_tab);
534 
535 	spin_lock(&info->lock);
536 	if (info->attr.mq_curmsgs)
537 		retval = POLLIN | POLLRDNORM;
538 
539 	if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
540 		retval |= POLLOUT | POLLWRNORM;
541 	spin_unlock(&info->lock);
542 
543 	return retval;
544 }
545 
546 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
wq_add(struct mqueue_inode_info * info,int sr,struct ext_wait_queue * ewp)547 static void wq_add(struct mqueue_inode_info *info, int sr,
548 			struct ext_wait_queue *ewp)
549 {
550 	struct ext_wait_queue *walk;
551 
552 	ewp->task = current;
553 
554 	list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
555 		if (walk->task->static_prio <= current->static_prio) {
556 			list_add_tail(&ewp->list, &walk->list);
557 			return;
558 		}
559 	}
560 	list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
561 }
562 
563 /*
564  * Puts current task to sleep. Caller must hold queue lock. After return
565  * lock isn't held.
566  * sr: SEND or RECV
567  */
wq_sleep(struct mqueue_inode_info * info,int sr,ktime_t * timeout,struct ext_wait_queue * ewp)568 static int wq_sleep(struct mqueue_inode_info *info, int sr,
569 		    ktime_t *timeout, struct ext_wait_queue *ewp)
570 {
571 	int retval;
572 	signed long time;
573 
574 	wq_add(info, sr, ewp);
575 
576 	for (;;) {
577 		__set_current_state(TASK_INTERRUPTIBLE);
578 
579 		spin_unlock(&info->lock);
580 		time = schedule_hrtimeout_range_clock(timeout, 0,
581 			HRTIMER_MODE_ABS, CLOCK_REALTIME);
582 
583 		if (ewp->state == STATE_READY) {
584 			retval = 0;
585 			goto out;
586 		}
587 		spin_lock(&info->lock);
588 		if (ewp->state == STATE_READY) {
589 			retval = 0;
590 			goto out_unlock;
591 		}
592 		if (signal_pending(current)) {
593 			retval = -ERESTARTSYS;
594 			break;
595 		}
596 		if (time == 0) {
597 			retval = -ETIMEDOUT;
598 			break;
599 		}
600 	}
601 	list_del(&ewp->list);
602 out_unlock:
603 	spin_unlock(&info->lock);
604 out:
605 	return retval;
606 }
607 
608 /*
609  * Returns waiting task that should be serviced first or NULL if none exists
610  */
wq_get_first_waiter(struct mqueue_inode_info * info,int sr)611 static struct ext_wait_queue *wq_get_first_waiter(
612 		struct mqueue_inode_info *info, int sr)
613 {
614 	struct list_head *ptr;
615 
616 	ptr = info->e_wait_q[sr].list.prev;
617 	if (ptr == &info->e_wait_q[sr].list)
618 		return NULL;
619 	return list_entry(ptr, struct ext_wait_queue, list);
620 }
621 
622 
set_cookie(struct sk_buff * skb,char code)623 static inline void set_cookie(struct sk_buff *skb, char code)
624 {
625 	((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
626 }
627 
628 /*
629  * The next function is only to split too long sys_mq_timedsend
630  */
__do_notify(struct mqueue_inode_info * info)631 static void __do_notify(struct mqueue_inode_info *info)
632 {
633 	/* notification
634 	 * invoked when there is registered process and there isn't process
635 	 * waiting synchronously for message AND state of queue changed from
636 	 * empty to not empty. Here we are sure that no one is waiting
637 	 * synchronously. */
638 	if (info->notify_owner &&
639 	    info->attr.mq_curmsgs == 1) {
640 		struct siginfo sig_i;
641 		switch (info->notify.sigev_notify) {
642 		case SIGEV_NONE:
643 			break;
644 		case SIGEV_SIGNAL:
645 			/* sends signal */
646 
647 			sig_i.si_signo = info->notify.sigev_signo;
648 			sig_i.si_errno = 0;
649 			sig_i.si_code = SI_MESGQ;
650 			sig_i.si_value = info->notify.sigev_value;
651 			/* map current pid/uid into info->owner's namespaces */
652 			rcu_read_lock();
653 			sig_i.si_pid = task_tgid_nr_ns(current,
654 						ns_of_pid(info->notify_owner));
655 			sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
656 			rcu_read_unlock();
657 
658 			kill_pid_info(info->notify.sigev_signo,
659 				      &sig_i, info->notify_owner);
660 			break;
661 		case SIGEV_THREAD:
662 			set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
663 			netlink_sendskb(info->notify_sock, info->notify_cookie);
664 			break;
665 		}
666 		/* after notification unregisters process */
667 		put_pid(info->notify_owner);
668 		put_user_ns(info->notify_user_ns);
669 		info->notify_owner = NULL;
670 		info->notify_user_ns = NULL;
671 	}
672 	wake_up(&info->wait_q);
673 }
674 
prepare_timeout(const struct timespec __user * u_abs_timeout,ktime_t * expires,struct timespec * ts)675 static int prepare_timeout(const struct timespec __user *u_abs_timeout,
676 			   ktime_t *expires, struct timespec *ts)
677 {
678 	if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
679 		return -EFAULT;
680 	if (!timespec_valid(ts))
681 		return -EINVAL;
682 
683 	*expires = timespec_to_ktime(*ts);
684 	return 0;
685 }
686 
remove_notification(struct mqueue_inode_info * info)687 static void remove_notification(struct mqueue_inode_info *info)
688 {
689 	if (info->notify_owner != NULL &&
690 	    info->notify.sigev_notify == SIGEV_THREAD) {
691 		set_cookie(info->notify_cookie, NOTIFY_REMOVED);
692 		netlink_sendskb(info->notify_sock, info->notify_cookie);
693 	}
694 	put_pid(info->notify_owner);
695 	put_user_ns(info->notify_user_ns);
696 	info->notify_owner = NULL;
697 	info->notify_user_ns = NULL;
698 }
699 
mq_attr_ok(struct ipc_namespace * ipc_ns,struct mq_attr * attr)700 static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
701 {
702 	int mq_treesize;
703 	unsigned long total_size;
704 
705 	if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
706 		return -EINVAL;
707 	if (capable(CAP_SYS_RESOURCE)) {
708 		if (attr->mq_maxmsg > HARD_MSGMAX ||
709 		    attr->mq_msgsize > HARD_MSGSIZEMAX)
710 			return -EINVAL;
711 	} else {
712 		if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
713 				attr->mq_msgsize > ipc_ns->mq_msgsize_max)
714 			return -EINVAL;
715 	}
716 	/* check for overflow */
717 	if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
718 		return -EOVERFLOW;
719 	mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
720 		min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
721 		sizeof(struct posix_msg_tree_node);
722 	total_size = attr->mq_maxmsg * attr->mq_msgsize;
723 	if (total_size + mq_treesize < total_size)
724 		return -EOVERFLOW;
725 	return 0;
726 }
727 
728 /*
729  * Invoked when creating a new queue via sys_mq_open
730  */
do_create(struct ipc_namespace * ipc_ns,struct inode * dir,struct path * path,int oflag,umode_t mode,struct mq_attr * attr)731 static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
732 			struct path *path, int oflag, umode_t mode,
733 			struct mq_attr *attr)
734 {
735 	const struct cred *cred = current_cred();
736 	int ret;
737 
738 	if (attr) {
739 		ret = mq_attr_ok(ipc_ns, attr);
740 		if (ret)
741 			return ERR_PTR(ret);
742 		/* store for use during create */
743 		path->dentry->d_fsdata = attr;
744 	} else {
745 		struct mq_attr def_attr;
746 
747 		def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
748 					 ipc_ns->mq_msg_default);
749 		def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
750 					  ipc_ns->mq_msgsize_default);
751 		ret = mq_attr_ok(ipc_ns, &def_attr);
752 		if (ret)
753 			return ERR_PTR(ret);
754 	}
755 
756 	mode &= ~current_umask();
757 	ret = vfs_create2(path->mnt, dir, path->dentry, mode, true);
758 	path->dentry->d_fsdata = NULL;
759 	if (ret)
760 		return ERR_PTR(ret);
761 	return dentry_open(path, oflag, cred);
762 }
763 
764 /* Opens existing queue */
do_open(struct path * path,int oflag)765 static struct file *do_open(struct path *path, int oflag)
766 {
767 	static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
768 						  MAY_READ | MAY_WRITE };
769 	int acc;
770 	if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
771 		return ERR_PTR(-EINVAL);
772 	acc = oflag2acc[oflag & O_ACCMODE];
773 	if (inode_permission2(path->mnt, d_inode(path->dentry), acc))
774 		return ERR_PTR(-EACCES);
775 	return dentry_open(path, oflag, current_cred());
776 }
777 
SYSCALL_DEFINE4(mq_open,const char __user *,u_name,int,oflag,umode_t,mode,struct mq_attr __user *,u_attr)778 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
779 		struct mq_attr __user *, u_attr)
780 {
781 	struct path path;
782 	struct file *filp;
783 	struct filename *name;
784 	struct mq_attr attr;
785 	int fd, error;
786 	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
787 	struct vfsmount *mnt = ipc_ns->mq_mnt;
788 	struct dentry *root = mnt->mnt_root;
789 	int ro;
790 
791 	if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
792 		return -EFAULT;
793 
794 	audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
795 
796 	if (IS_ERR(name = getname(u_name)))
797 		return PTR_ERR(name);
798 
799 	fd = get_unused_fd_flags(O_CLOEXEC);
800 	if (fd < 0)
801 		goto out_putname;
802 
803 	ro = mnt_want_write(mnt);	/* we'll drop it in any case */
804 	error = 0;
805 	mutex_lock(&d_inode(root)->i_mutex);
806 	path.dentry = lookup_one_len2(name->name, mnt, root, strlen(name->name));
807 	if (IS_ERR(path.dentry)) {
808 		error = PTR_ERR(path.dentry);
809 		goto out_putfd;
810 	}
811 	path.mnt = mntget(mnt);
812 
813 	if (oflag & O_CREAT) {
814 		if (d_really_is_positive(path.dentry)) {	/* entry already exists */
815 			audit_inode(name, path.dentry, 0);
816 			if (oflag & O_EXCL) {
817 				error = -EEXIST;
818 				goto out;
819 			}
820 			filp = do_open(&path, oflag);
821 		} else {
822 			if (ro) {
823 				error = ro;
824 				goto out;
825 			}
826 			audit_inode_parent_hidden(name, root);
827 			filp = do_create(ipc_ns, d_inode(root),
828 						&path, oflag, mode,
829 						u_attr ? &attr : NULL);
830 		}
831 	} else {
832 		if (d_really_is_negative(path.dentry)) {
833 			error = -ENOENT;
834 			goto out;
835 		}
836 		audit_inode(name, path.dentry, 0);
837 		filp = do_open(&path, oflag);
838 	}
839 
840 	if (!IS_ERR(filp))
841 		fd_install(fd, filp);
842 	else
843 		error = PTR_ERR(filp);
844 out:
845 	path_put(&path);
846 out_putfd:
847 	if (error) {
848 		put_unused_fd(fd);
849 		fd = error;
850 	}
851 	mutex_unlock(&d_inode(root)->i_mutex);
852 	if (!ro)
853 		mnt_drop_write(mnt);
854 out_putname:
855 	putname(name);
856 	return fd;
857 }
858 
SYSCALL_DEFINE1(mq_unlink,const char __user *,u_name)859 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
860 {
861 	int err;
862 	struct filename *name;
863 	struct dentry *dentry;
864 	struct inode *inode = NULL;
865 	struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
866 	struct vfsmount *mnt = ipc_ns->mq_mnt;
867 
868 	name = getname(u_name);
869 	if (IS_ERR(name))
870 		return PTR_ERR(name);
871 
872 	audit_inode_parent_hidden(name, mnt->mnt_root);
873 	err = mnt_want_write(mnt);
874 	if (err)
875 		goto out_name;
876 	mutex_lock_nested(&d_inode(mnt->mnt_root)->i_mutex, I_MUTEX_PARENT);
877 	dentry = lookup_one_len2(name->name, mnt, mnt->mnt_root,
878 				strlen(name->name));
879 	if (IS_ERR(dentry)) {
880 		err = PTR_ERR(dentry);
881 		goto out_unlock;
882 	}
883 
884 	inode = d_inode(dentry);
885 	if (!inode) {
886 		err = -ENOENT;
887 	} else {
888 		ihold(inode);
889 		err = vfs_unlink2(mnt, d_inode(dentry->d_parent), dentry, NULL);
890 	}
891 	dput(dentry);
892 
893 out_unlock:
894 	mutex_unlock(&d_inode(mnt->mnt_root)->i_mutex);
895 	if (inode)
896 		iput(inode);
897 	mnt_drop_write(mnt);
898 out_name:
899 	putname(name);
900 
901 	return err;
902 }
903 
904 /* Pipelined send and receive functions.
905  *
906  * If a receiver finds no waiting message, then it registers itself in the
907  * list of waiting receivers. A sender checks that list before adding the new
908  * message into the message array. If there is a waiting receiver, then it
909  * bypasses the message array and directly hands the message over to the
910  * receiver. The receiver accepts the message and returns without grabbing the
911  * queue spinlock:
912  *
913  * - Set pointer to message.
914  * - Queue the receiver task for later wakeup (without the info->lock).
915  * - Update its state to STATE_READY. Now the receiver can continue.
916  * - Wake up the process after the lock is dropped. Should the process wake up
917  *   before this wakeup (due to a timeout or a signal) it will either see
918  *   STATE_READY and continue or acquire the lock to check the state again.
919  *
920  * The same algorithm is used for senders.
921  */
922 
923 /* pipelined_send() - send a message directly to the task waiting in
924  * sys_mq_timedreceive() (without inserting message into a queue).
925  */
pipelined_send(struct wake_q_head * wake_q,struct mqueue_inode_info * info,struct msg_msg * message,struct ext_wait_queue * receiver)926 static inline void pipelined_send(struct wake_q_head *wake_q,
927 				  struct mqueue_inode_info *info,
928 				  struct msg_msg *message,
929 				  struct ext_wait_queue *receiver)
930 {
931 	receiver->msg = message;
932 	list_del(&receiver->list);
933 	wake_q_add(wake_q, receiver->task);
934 	/*
935 	 * Rely on the implicit cmpxchg barrier from wake_q_add such
936 	 * that we can ensure that updating receiver->state is the last
937 	 * write operation: As once set, the receiver can continue,
938 	 * and if we don't have the reference count from the wake_q,
939 	 * yet, at that point we can later have a use-after-free
940 	 * condition and bogus wakeup.
941 	 */
942 	receiver->state = STATE_READY;
943 }
944 
945 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
946  * gets its message and put to the queue (we have one free place for sure). */
pipelined_receive(struct wake_q_head * wake_q,struct mqueue_inode_info * info)947 static inline void pipelined_receive(struct wake_q_head *wake_q,
948 				     struct mqueue_inode_info *info)
949 {
950 	struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
951 
952 	if (!sender) {
953 		/* for poll */
954 		wake_up_interruptible(&info->wait_q);
955 		return;
956 	}
957 	if (msg_insert(sender->msg, info))
958 		return;
959 
960 	list_del(&sender->list);
961 	wake_q_add(wake_q, sender->task);
962 	sender->state = STATE_READY;
963 }
964 
SYSCALL_DEFINE5(mq_timedsend,mqd_t,mqdes,const char __user *,u_msg_ptr,size_t,msg_len,unsigned int,msg_prio,const struct timespec __user *,u_abs_timeout)965 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
966 		size_t, msg_len, unsigned int, msg_prio,
967 		const struct timespec __user *, u_abs_timeout)
968 {
969 	struct fd f;
970 	struct inode *inode;
971 	struct ext_wait_queue wait;
972 	struct ext_wait_queue *receiver;
973 	struct msg_msg *msg_ptr;
974 	struct mqueue_inode_info *info;
975 	ktime_t expires, *timeout = NULL;
976 	struct timespec ts;
977 	struct posix_msg_tree_node *new_leaf = NULL;
978 	int ret = 0;
979 	WAKE_Q(wake_q);
980 
981 	if (u_abs_timeout) {
982 		int res = prepare_timeout(u_abs_timeout, &expires, &ts);
983 		if (res)
984 			return res;
985 		timeout = &expires;
986 	}
987 
988 	if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
989 		return -EINVAL;
990 
991 	audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
992 
993 	f = fdget(mqdes);
994 	if (unlikely(!f.file)) {
995 		ret = -EBADF;
996 		goto out;
997 	}
998 
999 	inode = file_inode(f.file);
1000 	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1001 		ret = -EBADF;
1002 		goto out_fput;
1003 	}
1004 	info = MQUEUE_I(inode);
1005 	audit_file(f.file);
1006 
1007 	if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1008 		ret = -EBADF;
1009 		goto out_fput;
1010 	}
1011 
1012 	if (unlikely(msg_len > info->attr.mq_msgsize)) {
1013 		ret = -EMSGSIZE;
1014 		goto out_fput;
1015 	}
1016 
1017 	/* First try to allocate memory, before doing anything with
1018 	 * existing queues. */
1019 	msg_ptr = load_msg(u_msg_ptr, msg_len);
1020 	if (IS_ERR(msg_ptr)) {
1021 		ret = PTR_ERR(msg_ptr);
1022 		goto out_fput;
1023 	}
1024 	msg_ptr->m_ts = msg_len;
1025 	msg_ptr->m_type = msg_prio;
1026 
1027 	/*
1028 	 * msg_insert really wants us to have a valid, spare node struct so
1029 	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1030 	 * fall back to that if necessary.
1031 	 */
1032 	if (!info->node_cache)
1033 		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1034 
1035 	spin_lock(&info->lock);
1036 
1037 	if (!info->node_cache && new_leaf) {
1038 		/* Save our speculative allocation into the cache */
1039 		INIT_LIST_HEAD(&new_leaf->msg_list);
1040 		info->node_cache = new_leaf;
1041 		new_leaf = NULL;
1042 	} else {
1043 		kfree(new_leaf);
1044 	}
1045 
1046 	if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1047 		if (f.file->f_flags & O_NONBLOCK) {
1048 			ret = -EAGAIN;
1049 		} else {
1050 			wait.task = current;
1051 			wait.msg = (void *) msg_ptr;
1052 			wait.state = STATE_NONE;
1053 			ret = wq_sleep(info, SEND, timeout, &wait);
1054 			/*
1055 			 * wq_sleep must be called with info->lock held, and
1056 			 * returns with the lock released
1057 			 */
1058 			goto out_free;
1059 		}
1060 	} else {
1061 		receiver = wq_get_first_waiter(info, RECV);
1062 		if (receiver) {
1063 			pipelined_send(&wake_q, info, msg_ptr, receiver);
1064 		} else {
1065 			/* adds message to the queue */
1066 			ret = msg_insert(msg_ptr, info);
1067 			if (ret)
1068 				goto out_unlock;
1069 			__do_notify(info);
1070 		}
1071 		inode->i_atime = inode->i_mtime = inode->i_ctime =
1072 				CURRENT_TIME;
1073 	}
1074 out_unlock:
1075 	spin_unlock(&info->lock);
1076 	wake_up_q(&wake_q);
1077 out_free:
1078 	if (ret)
1079 		free_msg(msg_ptr);
1080 out_fput:
1081 	fdput(f);
1082 out:
1083 	return ret;
1084 }
1085 
SYSCALL_DEFINE5(mq_timedreceive,mqd_t,mqdes,char __user *,u_msg_ptr,size_t,msg_len,unsigned int __user *,u_msg_prio,const struct timespec __user *,u_abs_timeout)1086 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1087 		size_t, msg_len, unsigned int __user *, u_msg_prio,
1088 		const struct timespec __user *, u_abs_timeout)
1089 {
1090 	ssize_t ret;
1091 	struct msg_msg *msg_ptr;
1092 	struct fd f;
1093 	struct inode *inode;
1094 	struct mqueue_inode_info *info;
1095 	struct ext_wait_queue wait;
1096 	ktime_t expires, *timeout = NULL;
1097 	struct timespec ts;
1098 	struct posix_msg_tree_node *new_leaf = NULL;
1099 
1100 	if (u_abs_timeout) {
1101 		int res = prepare_timeout(u_abs_timeout, &expires, &ts);
1102 		if (res)
1103 			return res;
1104 		timeout = &expires;
1105 	}
1106 
1107 	audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
1108 
1109 	f = fdget(mqdes);
1110 	if (unlikely(!f.file)) {
1111 		ret = -EBADF;
1112 		goto out;
1113 	}
1114 
1115 	inode = file_inode(f.file);
1116 	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1117 		ret = -EBADF;
1118 		goto out_fput;
1119 	}
1120 	info = MQUEUE_I(inode);
1121 	audit_file(f.file);
1122 
1123 	if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1124 		ret = -EBADF;
1125 		goto out_fput;
1126 	}
1127 
1128 	/* checks if buffer is big enough */
1129 	if (unlikely(msg_len < info->attr.mq_msgsize)) {
1130 		ret = -EMSGSIZE;
1131 		goto out_fput;
1132 	}
1133 
1134 	/*
1135 	 * msg_insert really wants us to have a valid, spare node struct so
1136 	 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1137 	 * fall back to that if necessary.
1138 	 */
1139 	if (!info->node_cache)
1140 		new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1141 
1142 	spin_lock(&info->lock);
1143 
1144 	if (!info->node_cache && new_leaf) {
1145 		/* Save our speculative allocation into the cache */
1146 		INIT_LIST_HEAD(&new_leaf->msg_list);
1147 		info->node_cache = new_leaf;
1148 	} else {
1149 		kfree(new_leaf);
1150 	}
1151 
1152 	if (info->attr.mq_curmsgs == 0) {
1153 		if (f.file->f_flags & O_NONBLOCK) {
1154 			spin_unlock(&info->lock);
1155 			ret = -EAGAIN;
1156 		} else {
1157 			wait.task = current;
1158 			wait.state = STATE_NONE;
1159 			ret = wq_sleep(info, RECV, timeout, &wait);
1160 			msg_ptr = wait.msg;
1161 		}
1162 	} else {
1163 		WAKE_Q(wake_q);
1164 
1165 		msg_ptr = msg_get(info);
1166 
1167 		inode->i_atime = inode->i_mtime = inode->i_ctime =
1168 				CURRENT_TIME;
1169 
1170 		/* There is now free space in queue. */
1171 		pipelined_receive(&wake_q, info);
1172 		spin_unlock(&info->lock);
1173 		wake_up_q(&wake_q);
1174 		ret = 0;
1175 	}
1176 	if (ret == 0) {
1177 		ret = msg_ptr->m_ts;
1178 
1179 		if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1180 			store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1181 			ret = -EFAULT;
1182 		}
1183 		free_msg(msg_ptr);
1184 	}
1185 out_fput:
1186 	fdput(f);
1187 out:
1188 	return ret;
1189 }
1190 
1191 /*
1192  * Notes: the case when user wants us to deregister (with NULL as pointer)
1193  * and he isn't currently owner of notification, will be silently discarded.
1194  * It isn't explicitly defined in the POSIX.
1195  */
SYSCALL_DEFINE2(mq_notify,mqd_t,mqdes,const struct sigevent __user *,u_notification)1196 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1197 		const struct sigevent __user *, u_notification)
1198 {
1199 	int ret;
1200 	struct fd f;
1201 	struct sock *sock;
1202 	struct inode *inode;
1203 	struct sigevent notification;
1204 	struct mqueue_inode_info *info;
1205 	struct sk_buff *nc;
1206 
1207 	if (u_notification) {
1208 		if (copy_from_user(&notification, u_notification,
1209 					sizeof(struct sigevent)))
1210 			return -EFAULT;
1211 	}
1212 
1213 	audit_mq_notify(mqdes, u_notification ? &notification : NULL);
1214 
1215 	nc = NULL;
1216 	sock = NULL;
1217 	if (u_notification != NULL) {
1218 		if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1219 			     notification.sigev_notify != SIGEV_SIGNAL &&
1220 			     notification.sigev_notify != SIGEV_THREAD))
1221 			return -EINVAL;
1222 		if (notification.sigev_notify == SIGEV_SIGNAL &&
1223 			!valid_signal(notification.sigev_signo)) {
1224 			return -EINVAL;
1225 		}
1226 		if (notification.sigev_notify == SIGEV_THREAD) {
1227 			long timeo;
1228 
1229 			/* create the notify skb */
1230 			nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1231 			if (!nc) {
1232 				ret = -ENOMEM;
1233 				goto out;
1234 			}
1235 			if (copy_from_user(nc->data,
1236 					notification.sigev_value.sival_ptr,
1237 					NOTIFY_COOKIE_LEN)) {
1238 				ret = -EFAULT;
1239 				goto out;
1240 			}
1241 
1242 			/* TODO: add a header? */
1243 			skb_put(nc, NOTIFY_COOKIE_LEN);
1244 			/* and attach it to the socket */
1245 retry:
1246 			f = fdget(notification.sigev_signo);
1247 			if (!f.file) {
1248 				ret = -EBADF;
1249 				goto out;
1250 			}
1251 			sock = netlink_getsockbyfilp(f.file);
1252 			fdput(f);
1253 			if (IS_ERR(sock)) {
1254 				ret = PTR_ERR(sock);
1255 				sock = NULL;
1256 				goto out;
1257 			}
1258 
1259 			timeo = MAX_SCHEDULE_TIMEOUT;
1260 			ret = netlink_attachskb(sock, nc, &timeo, NULL);
1261 			if (ret == 1) {
1262 				sock = NULL;
1263 				goto retry;
1264 			}
1265 			if (ret) {
1266 				sock = NULL;
1267 				nc = NULL;
1268 				goto out;
1269 			}
1270 		}
1271 	}
1272 
1273 	f = fdget(mqdes);
1274 	if (!f.file) {
1275 		ret = -EBADF;
1276 		goto out;
1277 	}
1278 
1279 	inode = file_inode(f.file);
1280 	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1281 		ret = -EBADF;
1282 		goto out_fput;
1283 	}
1284 	info = MQUEUE_I(inode);
1285 
1286 	ret = 0;
1287 	spin_lock(&info->lock);
1288 	if (u_notification == NULL) {
1289 		if (info->notify_owner == task_tgid(current)) {
1290 			remove_notification(info);
1291 			inode->i_atime = inode->i_ctime = CURRENT_TIME;
1292 		}
1293 	} else if (info->notify_owner != NULL) {
1294 		ret = -EBUSY;
1295 	} else {
1296 		switch (notification.sigev_notify) {
1297 		case SIGEV_NONE:
1298 			info->notify.sigev_notify = SIGEV_NONE;
1299 			break;
1300 		case SIGEV_THREAD:
1301 			info->notify_sock = sock;
1302 			info->notify_cookie = nc;
1303 			sock = NULL;
1304 			nc = NULL;
1305 			info->notify.sigev_notify = SIGEV_THREAD;
1306 			break;
1307 		case SIGEV_SIGNAL:
1308 			info->notify.sigev_signo = notification.sigev_signo;
1309 			info->notify.sigev_value = notification.sigev_value;
1310 			info->notify.sigev_notify = SIGEV_SIGNAL;
1311 			break;
1312 		}
1313 
1314 		info->notify_owner = get_pid(task_tgid(current));
1315 		info->notify_user_ns = get_user_ns(current_user_ns());
1316 		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1317 	}
1318 	spin_unlock(&info->lock);
1319 out_fput:
1320 	fdput(f);
1321 out:
1322 	if (sock)
1323 		netlink_detachskb(sock, nc);
1324 	else if (nc)
1325 		dev_kfree_skb(nc);
1326 
1327 	return ret;
1328 }
1329 
SYSCALL_DEFINE3(mq_getsetattr,mqd_t,mqdes,const struct mq_attr __user *,u_mqstat,struct mq_attr __user *,u_omqstat)1330 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1331 		const struct mq_attr __user *, u_mqstat,
1332 		struct mq_attr __user *, u_omqstat)
1333 {
1334 	int ret;
1335 	struct mq_attr mqstat, omqstat;
1336 	struct fd f;
1337 	struct inode *inode;
1338 	struct mqueue_inode_info *info;
1339 
1340 	if (u_mqstat != NULL) {
1341 		if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1342 			return -EFAULT;
1343 		if (mqstat.mq_flags & (~O_NONBLOCK))
1344 			return -EINVAL;
1345 	}
1346 
1347 	f = fdget(mqdes);
1348 	if (!f.file) {
1349 		ret = -EBADF;
1350 		goto out;
1351 	}
1352 
1353 	inode = file_inode(f.file);
1354 	if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1355 		ret = -EBADF;
1356 		goto out_fput;
1357 	}
1358 	info = MQUEUE_I(inode);
1359 
1360 	spin_lock(&info->lock);
1361 
1362 	omqstat = info->attr;
1363 	omqstat.mq_flags = f.file->f_flags & O_NONBLOCK;
1364 	if (u_mqstat) {
1365 		audit_mq_getsetattr(mqdes, &mqstat);
1366 		spin_lock(&f.file->f_lock);
1367 		if (mqstat.mq_flags & O_NONBLOCK)
1368 			f.file->f_flags |= O_NONBLOCK;
1369 		else
1370 			f.file->f_flags &= ~O_NONBLOCK;
1371 		spin_unlock(&f.file->f_lock);
1372 
1373 		inode->i_atime = inode->i_ctime = CURRENT_TIME;
1374 	}
1375 
1376 	spin_unlock(&info->lock);
1377 
1378 	ret = 0;
1379 	if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1380 						sizeof(struct mq_attr)))
1381 		ret = -EFAULT;
1382 
1383 out_fput:
1384 	fdput(f);
1385 out:
1386 	return ret;
1387 }
1388 
1389 static const struct inode_operations mqueue_dir_inode_operations = {
1390 	.lookup = simple_lookup,
1391 	.create = mqueue_create,
1392 	.unlink = mqueue_unlink,
1393 };
1394 
1395 static const struct file_operations mqueue_file_operations = {
1396 	.flush = mqueue_flush_file,
1397 	.poll = mqueue_poll_file,
1398 	.read = mqueue_read_file,
1399 	.llseek = default_llseek,
1400 };
1401 
1402 static const struct super_operations mqueue_super_ops = {
1403 	.alloc_inode = mqueue_alloc_inode,
1404 	.destroy_inode = mqueue_destroy_inode,
1405 	.evict_inode = mqueue_evict_inode,
1406 	.statfs = simple_statfs,
1407 };
1408 
1409 static struct file_system_type mqueue_fs_type = {
1410 	.name = "mqueue",
1411 	.mount = mqueue_mount,
1412 	.kill_sb = kill_litter_super,
1413 	.fs_flags = FS_USERNS_MOUNT,
1414 };
1415 
mq_init_ns(struct ipc_namespace * ns)1416 int mq_init_ns(struct ipc_namespace *ns)
1417 {
1418 	ns->mq_queues_count  = 0;
1419 	ns->mq_queues_max    = DFLT_QUEUESMAX;
1420 	ns->mq_msg_max       = DFLT_MSGMAX;
1421 	ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1422 	ns->mq_msg_default   = DFLT_MSG;
1423 	ns->mq_msgsize_default  = DFLT_MSGSIZE;
1424 
1425 	ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1426 	if (IS_ERR(ns->mq_mnt)) {
1427 		int err = PTR_ERR(ns->mq_mnt);
1428 		ns->mq_mnt = NULL;
1429 		return err;
1430 	}
1431 	return 0;
1432 }
1433 
mq_clear_sbinfo(struct ipc_namespace * ns)1434 void mq_clear_sbinfo(struct ipc_namespace *ns)
1435 {
1436 	ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1437 }
1438 
mq_put_mnt(struct ipc_namespace * ns)1439 void mq_put_mnt(struct ipc_namespace *ns)
1440 {
1441 	kern_unmount(ns->mq_mnt);
1442 }
1443 
init_mqueue_fs(void)1444 static int __init init_mqueue_fs(void)
1445 {
1446 	int error;
1447 
1448 	mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1449 				sizeof(struct mqueue_inode_info), 0,
1450 				SLAB_HWCACHE_ALIGN, init_once);
1451 	if (mqueue_inode_cachep == NULL)
1452 		return -ENOMEM;
1453 
1454 	/* ignore failures - they are not fatal */
1455 	mq_sysctl_table = mq_register_sysctl_table();
1456 
1457 	error = register_filesystem(&mqueue_fs_type);
1458 	if (error)
1459 		goto out_sysctl;
1460 
1461 	spin_lock_init(&mq_lock);
1462 
1463 	error = mq_init_ns(&init_ipc_ns);
1464 	if (error)
1465 		goto out_filesystem;
1466 
1467 	return 0;
1468 
1469 out_filesystem:
1470 	unregister_filesystem(&mqueue_fs_type);
1471 out_sysctl:
1472 	if (mq_sysctl_table)
1473 		unregister_sysctl_table(mq_sysctl_table);
1474 	kmem_cache_destroy(mqueue_inode_cachep);
1475 	return error;
1476 }
1477 
1478 device_initcall(init_mqueue_fs);
1479