1 /*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
6 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
10 *
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/fs_context.h>
22 #include <linux/namei.h>
23 #include <linux/sysctl.h>
24 #include <linux/poll.h>
25 #include <linux/mqueue.h>
26 #include <linux/msg.h>
27 #include <linux/skbuff.h>
28 #include <linux/vmalloc.h>
29 #include <linux/netlink.h>
30 #include <linux/syscalls.h>
31 #include <linux/audit.h>
32 #include <linux/signal.h>
33 #include <linux/mutex.h>
34 #include <linux/nsproxy.h>
35 #include <linux/pid.h>
36 #include <linux/ipc_namespace.h>
37 #include <linux/user_namespace.h>
38 #include <linux/slab.h>
39 #include <linux/sched/wake_q.h>
40 #include <linux/sched/signal.h>
41 #include <linux/sched/user.h>
42
43 #include <net/sock.h>
44 #include "util.h"
45
46 struct mqueue_fs_context {
47 struct ipc_namespace *ipc_ns;
48 bool newns; /* Set if newly created ipc namespace */
49 };
50
51 #define MQUEUE_MAGIC 0x19800202
52 #define DIRENT_SIZE 20
53 #define FILENT_SIZE 80
54
55 #define SEND 0
56 #define RECV 1
57
58 #define STATE_NONE 0
59 #define STATE_READY 1
60
61 struct posix_msg_tree_node {
62 struct rb_node rb_node;
63 struct list_head msg_list;
64 int priority;
65 };
66
67 /*
68 * Locking:
69 *
70 * Accesses to a message queue are synchronized by acquiring info->lock.
71 *
72 * There are two notable exceptions:
73 * - The actual wakeup of a sleeping task is performed using the wake_q
74 * framework. info->lock is already released when wake_up_q is called.
75 * - The exit codepaths after sleeping check ext_wait_queue->state without
76 * any locks. If it is STATE_READY, then the syscall is completed without
77 * acquiring info->lock.
78 *
79 * MQ_BARRIER:
80 * To achieve proper release/acquire memory barrier pairing, the state is set to
81 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
82 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
83 *
84 * This prevents the following races:
85 *
86 * 1) With the simple wake_q_add(), the task could be gone already before
87 * the increase of the reference happens
88 * Thread A
89 * Thread B
90 * WRITE_ONCE(wait.state, STATE_NONE);
91 * schedule_hrtimeout()
92 * wake_q_add(A)
93 * if (cmpxchg()) // success
94 * ->state = STATE_READY (reordered)
95 * <timeout returns>
96 * if (wait.state == STATE_READY) return;
97 * sysret to user space
98 * sys_exit()
99 * get_task_struct() // UaF
100 *
101 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
102 * the smp_store_release() that does ->state = STATE_READY.
103 *
104 * 2) Without proper _release/_acquire barriers, the woken up task
105 * could read stale data
106 *
107 * Thread A
108 * Thread B
109 * do_mq_timedreceive
110 * WRITE_ONCE(wait.state, STATE_NONE);
111 * schedule_hrtimeout()
112 * state = STATE_READY;
113 * <timeout returns>
114 * if (wait.state == STATE_READY) return;
115 * msg_ptr = wait.msg; // Access to stale data!
116 * receiver->msg = message; (reordered)
117 *
118 * Solution: use _release and _acquire barriers.
119 *
120 * 3) There is intentionally no barrier when setting current->state
121 * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
122 * release memory barrier, and the wakeup is triggered when holding
123 * info->lock, i.e. spin_lock(&info->lock) provided a pairing
124 * acquire memory barrier.
125 */
126
127 struct ext_wait_queue { /* queue of sleeping tasks */
128 struct task_struct *task;
129 struct list_head list;
130 struct msg_msg *msg; /* ptr of loaded message */
131 int state; /* one of STATE_* values */
132 };
133
134 struct mqueue_inode_info {
135 spinlock_t lock;
136 struct inode vfs_inode;
137 wait_queue_head_t wait_q;
138
139 struct rb_root msg_tree;
140 struct rb_node *msg_tree_rightmost;
141 struct posix_msg_tree_node *node_cache;
142 struct mq_attr attr;
143
144 struct sigevent notify;
145 struct pid *notify_owner;
146 u32 notify_self_exec_id;
147 struct user_namespace *notify_user_ns;
148 struct ucounts *ucounts; /* user who created, for accounting */
149 struct sock *notify_sock;
150 struct sk_buff *notify_cookie;
151
152 /* for tasks waiting for free space and messages, respectively */
153 struct ext_wait_queue e_wait_q[2];
154
155 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
156 };
157
158 static struct file_system_type mqueue_fs_type;
159 static const struct inode_operations mqueue_dir_inode_operations;
160 static const struct file_operations mqueue_file_operations;
161 static const struct super_operations mqueue_super_ops;
162 static const struct fs_context_operations mqueue_fs_context_ops;
163 static void remove_notification(struct mqueue_inode_info *info);
164
165 static struct kmem_cache *mqueue_inode_cachep;
166
167 static struct ctl_table_header *mq_sysctl_table;
168
MQUEUE_I(struct inode * inode)169 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
170 {
171 return container_of(inode, struct mqueue_inode_info, vfs_inode);
172 }
173
174 /*
175 * This routine should be called with the mq_lock held.
176 */
__get_ns_from_inode(struct inode * inode)177 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
178 {
179 return get_ipc_ns(inode->i_sb->s_fs_info);
180 }
181
get_ns_from_inode(struct inode * inode)182 static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
183 {
184 struct ipc_namespace *ns;
185
186 spin_lock(&mq_lock);
187 ns = __get_ns_from_inode(inode);
188 spin_unlock(&mq_lock);
189 return ns;
190 }
191
192 /* Auxiliary functions to manipulate messages' list */
msg_insert(struct msg_msg * msg,struct mqueue_inode_info * info)193 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
194 {
195 struct rb_node **p, *parent = NULL;
196 struct posix_msg_tree_node *leaf;
197 bool rightmost = true;
198
199 p = &info->msg_tree.rb_node;
200 while (*p) {
201 parent = *p;
202 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
203
204 if (likely(leaf->priority == msg->m_type))
205 goto insert_msg;
206 else if (msg->m_type < leaf->priority) {
207 p = &(*p)->rb_left;
208 rightmost = false;
209 } else
210 p = &(*p)->rb_right;
211 }
212 if (info->node_cache) {
213 leaf = info->node_cache;
214 info->node_cache = NULL;
215 } else {
216 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
217 if (!leaf)
218 return -ENOMEM;
219 INIT_LIST_HEAD(&leaf->msg_list);
220 }
221 leaf->priority = msg->m_type;
222
223 if (rightmost)
224 info->msg_tree_rightmost = &leaf->rb_node;
225
226 rb_link_node(&leaf->rb_node, parent, p);
227 rb_insert_color(&leaf->rb_node, &info->msg_tree);
228 insert_msg:
229 info->attr.mq_curmsgs++;
230 info->qsize += msg->m_ts;
231 list_add_tail(&msg->m_list, &leaf->msg_list);
232 return 0;
233 }
234
msg_tree_erase(struct posix_msg_tree_node * leaf,struct mqueue_inode_info * info)235 static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
236 struct mqueue_inode_info *info)
237 {
238 struct rb_node *node = &leaf->rb_node;
239
240 if (info->msg_tree_rightmost == node)
241 info->msg_tree_rightmost = rb_prev(node);
242
243 rb_erase(node, &info->msg_tree);
244 if (info->node_cache)
245 kfree(leaf);
246 else
247 info->node_cache = leaf;
248 }
249
msg_get(struct mqueue_inode_info * info)250 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
251 {
252 struct rb_node *parent = NULL;
253 struct posix_msg_tree_node *leaf;
254 struct msg_msg *msg;
255
256 try_again:
257 /*
258 * During insert, low priorities go to the left and high to the
259 * right. On receive, we want the highest priorities first, so
260 * walk all the way to the right.
261 */
262 parent = info->msg_tree_rightmost;
263 if (!parent) {
264 if (info->attr.mq_curmsgs) {
265 pr_warn_once("Inconsistency in POSIX message queue, "
266 "no tree element, but supposedly messages "
267 "should exist!\n");
268 info->attr.mq_curmsgs = 0;
269 }
270 return NULL;
271 }
272 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
273 if (unlikely(list_empty(&leaf->msg_list))) {
274 pr_warn_once("Inconsistency in POSIX message queue, "
275 "empty leaf node but we haven't implemented "
276 "lazy leaf delete!\n");
277 msg_tree_erase(leaf, info);
278 goto try_again;
279 } else {
280 msg = list_first_entry(&leaf->msg_list,
281 struct msg_msg, m_list);
282 list_del(&msg->m_list);
283 if (list_empty(&leaf->msg_list)) {
284 msg_tree_erase(leaf, info);
285 }
286 }
287 info->attr.mq_curmsgs--;
288 info->qsize -= msg->m_ts;
289 return msg;
290 }
291
mqueue_get_inode(struct super_block * sb,struct ipc_namespace * ipc_ns,umode_t mode,struct mq_attr * attr)292 static struct inode *mqueue_get_inode(struct super_block *sb,
293 struct ipc_namespace *ipc_ns, umode_t mode,
294 struct mq_attr *attr)
295 {
296 struct inode *inode;
297 int ret = -ENOMEM;
298
299 inode = new_inode(sb);
300 if (!inode)
301 goto err;
302
303 inode->i_ino = get_next_ino();
304 inode->i_mode = mode;
305 inode->i_uid = current_fsuid();
306 inode->i_gid = current_fsgid();
307 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
308
309 if (S_ISREG(mode)) {
310 struct mqueue_inode_info *info;
311 unsigned long mq_bytes, mq_treesize;
312
313 inode->i_fop = &mqueue_file_operations;
314 inode->i_size = FILENT_SIZE;
315 /* mqueue specific info */
316 info = MQUEUE_I(inode);
317 spin_lock_init(&info->lock);
318 init_waitqueue_head(&info->wait_q);
319 INIT_LIST_HEAD(&info->e_wait_q[0].list);
320 INIT_LIST_HEAD(&info->e_wait_q[1].list);
321 info->notify_owner = NULL;
322 info->notify_user_ns = NULL;
323 info->qsize = 0;
324 info->ucounts = NULL; /* set when all is ok */
325 info->msg_tree = RB_ROOT;
326 info->msg_tree_rightmost = NULL;
327 info->node_cache = NULL;
328 memset(&info->attr, 0, sizeof(info->attr));
329 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
330 ipc_ns->mq_msg_default);
331 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
332 ipc_ns->mq_msgsize_default);
333 if (attr) {
334 info->attr.mq_maxmsg = attr->mq_maxmsg;
335 info->attr.mq_msgsize = attr->mq_msgsize;
336 }
337 /*
338 * We used to allocate a static array of pointers and account
339 * the size of that array as well as one msg_msg struct per
340 * possible message into the queue size. That's no longer
341 * accurate as the queue is now an rbtree and will grow and
342 * shrink depending on usage patterns. We can, however, still
343 * account one msg_msg struct per message, but the nodes are
344 * allocated depending on priority usage, and most programs
345 * only use one, or a handful, of priorities. However, since
346 * this is pinned memory, we need to assume worst case, so
347 * that means the min(mq_maxmsg, max_priorities) * struct
348 * posix_msg_tree_node.
349 */
350
351 ret = -EINVAL;
352 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
353 goto out_inode;
354 if (capable(CAP_SYS_RESOURCE)) {
355 if (info->attr.mq_maxmsg > HARD_MSGMAX ||
356 info->attr.mq_msgsize > HARD_MSGSIZEMAX)
357 goto out_inode;
358 } else {
359 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
360 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
361 goto out_inode;
362 }
363 ret = -EOVERFLOW;
364 /* check for overflow */
365 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
366 goto out_inode;
367 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
368 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
369 sizeof(struct posix_msg_tree_node);
370 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
371 if (mq_bytes + mq_treesize < mq_bytes)
372 goto out_inode;
373 mq_bytes += mq_treesize;
374 info->ucounts = get_ucounts(current_ucounts());
375 if (info->ucounts) {
376 long msgqueue;
377
378 spin_lock(&mq_lock);
379 msgqueue = inc_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
380 if (msgqueue == LONG_MAX || msgqueue > rlimit(RLIMIT_MSGQUEUE)) {
381 dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
382 spin_unlock(&mq_lock);
383 put_ucounts(info->ucounts);
384 info->ucounts = NULL;
385 /* mqueue_evict_inode() releases info->messages */
386 ret = -EMFILE;
387 goto out_inode;
388 }
389 spin_unlock(&mq_lock);
390 }
391 } else if (S_ISDIR(mode)) {
392 inc_nlink(inode);
393 /* Some things misbehave if size == 0 on a directory */
394 inode->i_size = 2 * DIRENT_SIZE;
395 inode->i_op = &mqueue_dir_inode_operations;
396 inode->i_fop = &simple_dir_operations;
397 }
398
399 return inode;
400 out_inode:
401 iput(inode);
402 err:
403 return ERR_PTR(ret);
404 }
405
mqueue_fill_super(struct super_block * sb,struct fs_context * fc)406 static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
407 {
408 struct inode *inode;
409 struct ipc_namespace *ns = sb->s_fs_info;
410
411 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
412 sb->s_blocksize = PAGE_SIZE;
413 sb->s_blocksize_bits = PAGE_SHIFT;
414 sb->s_magic = MQUEUE_MAGIC;
415 sb->s_op = &mqueue_super_ops;
416
417 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
418 if (IS_ERR(inode))
419 return PTR_ERR(inode);
420
421 sb->s_root = d_make_root(inode);
422 if (!sb->s_root)
423 return -ENOMEM;
424 return 0;
425 }
426
mqueue_get_tree(struct fs_context * fc)427 static int mqueue_get_tree(struct fs_context *fc)
428 {
429 struct mqueue_fs_context *ctx = fc->fs_private;
430
431 /*
432 * With a newly created ipc namespace, we don't need to do a search
433 * for an ipc namespace match, but we still need to set s_fs_info.
434 */
435 if (ctx->newns) {
436 fc->s_fs_info = ctx->ipc_ns;
437 return get_tree_nodev(fc, mqueue_fill_super);
438 }
439 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
440 }
441
mqueue_fs_context_free(struct fs_context * fc)442 static void mqueue_fs_context_free(struct fs_context *fc)
443 {
444 struct mqueue_fs_context *ctx = fc->fs_private;
445
446 put_ipc_ns(ctx->ipc_ns);
447 kfree(ctx);
448 }
449
mqueue_init_fs_context(struct fs_context * fc)450 static int mqueue_init_fs_context(struct fs_context *fc)
451 {
452 struct mqueue_fs_context *ctx;
453
454 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
455 if (!ctx)
456 return -ENOMEM;
457
458 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
459 put_user_ns(fc->user_ns);
460 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
461 fc->fs_private = ctx;
462 fc->ops = &mqueue_fs_context_ops;
463 return 0;
464 }
465
466 /*
467 * mq_init_ns() is currently the only caller of mq_create_mount().
468 * So the ns parameter is always a newly created ipc namespace.
469 */
mq_create_mount(struct ipc_namespace * ns)470 static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
471 {
472 struct mqueue_fs_context *ctx;
473 struct fs_context *fc;
474 struct vfsmount *mnt;
475
476 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
477 if (IS_ERR(fc))
478 return ERR_CAST(fc);
479
480 ctx = fc->fs_private;
481 ctx->newns = true;
482 put_ipc_ns(ctx->ipc_ns);
483 ctx->ipc_ns = get_ipc_ns(ns);
484 put_user_ns(fc->user_ns);
485 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
486
487 mnt = fc_mount(fc);
488 put_fs_context(fc);
489 return mnt;
490 }
491
init_once(void * foo)492 static void init_once(void *foo)
493 {
494 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
495
496 inode_init_once(&p->vfs_inode);
497 }
498
mqueue_alloc_inode(struct super_block * sb)499 static struct inode *mqueue_alloc_inode(struct super_block *sb)
500 {
501 struct mqueue_inode_info *ei;
502
503 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
504 if (!ei)
505 return NULL;
506 return &ei->vfs_inode;
507 }
508
mqueue_free_inode(struct inode * inode)509 static void mqueue_free_inode(struct inode *inode)
510 {
511 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
512 }
513
mqueue_evict_inode(struct inode * inode)514 static void mqueue_evict_inode(struct inode *inode)
515 {
516 struct mqueue_inode_info *info;
517 struct ipc_namespace *ipc_ns;
518 struct msg_msg *msg, *nmsg;
519 LIST_HEAD(tmp_msg);
520
521 clear_inode(inode);
522
523 if (S_ISDIR(inode->i_mode))
524 return;
525
526 ipc_ns = get_ns_from_inode(inode);
527 info = MQUEUE_I(inode);
528 spin_lock(&info->lock);
529 while ((msg = msg_get(info)) != NULL)
530 list_add_tail(&msg->m_list, &tmp_msg);
531 kfree(info->node_cache);
532 spin_unlock(&info->lock);
533
534 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
535 list_del(&msg->m_list);
536 free_msg(msg);
537 }
538
539 if (info->ucounts) {
540 unsigned long mq_bytes, mq_treesize;
541
542 /* Total amount of bytes accounted for the mqueue */
543 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
544 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
545 sizeof(struct posix_msg_tree_node);
546
547 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
548 info->attr.mq_msgsize);
549
550 spin_lock(&mq_lock);
551 dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
552 /*
553 * get_ns_from_inode() ensures that the
554 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
555 * to which we now hold a reference, or it is NULL.
556 * We can't put it here under mq_lock, though.
557 */
558 if (ipc_ns)
559 ipc_ns->mq_queues_count--;
560 spin_unlock(&mq_lock);
561 put_ucounts(info->ucounts);
562 info->ucounts = NULL;
563 }
564 if (ipc_ns)
565 put_ipc_ns(ipc_ns);
566 }
567
mqueue_create_attr(struct dentry * dentry,umode_t mode,void * arg)568 static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
569 {
570 struct inode *dir = dentry->d_parent->d_inode;
571 struct inode *inode;
572 struct mq_attr *attr = arg;
573 int error;
574 struct ipc_namespace *ipc_ns;
575
576 spin_lock(&mq_lock);
577 ipc_ns = __get_ns_from_inode(dir);
578 if (!ipc_ns) {
579 error = -EACCES;
580 goto out_unlock;
581 }
582
583 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
584 !capable(CAP_SYS_RESOURCE)) {
585 error = -ENOSPC;
586 goto out_unlock;
587 }
588 ipc_ns->mq_queues_count++;
589 spin_unlock(&mq_lock);
590
591 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
592 if (IS_ERR(inode)) {
593 error = PTR_ERR(inode);
594 spin_lock(&mq_lock);
595 ipc_ns->mq_queues_count--;
596 goto out_unlock;
597 }
598
599 put_ipc_ns(ipc_ns);
600 dir->i_size += DIRENT_SIZE;
601 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
602
603 d_instantiate(dentry, inode);
604 dget(dentry);
605 return 0;
606 out_unlock:
607 spin_unlock(&mq_lock);
608 if (ipc_ns)
609 put_ipc_ns(ipc_ns);
610 return error;
611 }
612
mqueue_create(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)613 static int mqueue_create(struct user_namespace *mnt_userns, struct inode *dir,
614 struct dentry *dentry, umode_t mode, bool excl)
615 {
616 return mqueue_create_attr(dentry, mode, NULL);
617 }
618
mqueue_unlink(struct inode * dir,struct dentry * dentry)619 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
620 {
621 struct inode *inode = d_inode(dentry);
622
623 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
624 dir->i_size -= DIRENT_SIZE;
625 drop_nlink(inode);
626 dput(dentry);
627 return 0;
628 }
629
630 /*
631 * This is routine for system read from queue file.
632 * To avoid mess with doing here some sort of mq_receive we allow
633 * to read only queue size & notification info (the only values
634 * that are interesting from user point of view and aren't accessible
635 * through std routines)
636 */
mqueue_read_file(struct file * filp,char __user * u_data,size_t count,loff_t * off)637 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
638 size_t count, loff_t *off)
639 {
640 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
641 char buffer[FILENT_SIZE];
642 ssize_t ret;
643
644 spin_lock(&info->lock);
645 snprintf(buffer, sizeof(buffer),
646 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
647 info->qsize,
648 info->notify_owner ? info->notify.sigev_notify : 0,
649 (info->notify_owner &&
650 info->notify.sigev_notify == SIGEV_SIGNAL) ?
651 info->notify.sigev_signo : 0,
652 pid_vnr(info->notify_owner));
653 spin_unlock(&info->lock);
654 buffer[sizeof(buffer)-1] = '\0';
655
656 ret = simple_read_from_buffer(u_data, count, off, buffer,
657 strlen(buffer));
658 if (ret <= 0)
659 return ret;
660
661 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
662 return ret;
663 }
664
mqueue_flush_file(struct file * filp,fl_owner_t id)665 static int mqueue_flush_file(struct file *filp, fl_owner_t id)
666 {
667 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
668
669 spin_lock(&info->lock);
670 if (task_tgid(current) == info->notify_owner)
671 remove_notification(info);
672
673 spin_unlock(&info->lock);
674 return 0;
675 }
676
mqueue_poll_file(struct file * filp,struct poll_table_struct * poll_tab)677 static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
678 {
679 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
680 __poll_t retval = 0;
681
682 poll_wait(filp, &info->wait_q, poll_tab);
683
684 spin_lock(&info->lock);
685 if (info->attr.mq_curmsgs)
686 retval = EPOLLIN | EPOLLRDNORM;
687
688 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
689 retval |= EPOLLOUT | EPOLLWRNORM;
690 spin_unlock(&info->lock);
691
692 return retval;
693 }
694
695 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
wq_add(struct mqueue_inode_info * info,int sr,struct ext_wait_queue * ewp)696 static void wq_add(struct mqueue_inode_info *info, int sr,
697 struct ext_wait_queue *ewp)
698 {
699 struct ext_wait_queue *walk;
700
701 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
702 if (walk->task->prio <= current->prio) {
703 list_add_tail(&ewp->list, &walk->list);
704 return;
705 }
706 }
707 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
708 }
709
710 /*
711 * Puts current task to sleep. Caller must hold queue lock. After return
712 * lock isn't held.
713 * sr: SEND or RECV
714 */
wq_sleep(struct mqueue_inode_info * info,int sr,ktime_t * timeout,struct ext_wait_queue * ewp)715 static int wq_sleep(struct mqueue_inode_info *info, int sr,
716 ktime_t *timeout, struct ext_wait_queue *ewp)
717 __releases(&info->lock)
718 {
719 int retval;
720 signed long time;
721
722 wq_add(info, sr, ewp);
723
724 for (;;) {
725 /* memory barrier not required, we hold info->lock */
726 __set_current_state(TASK_INTERRUPTIBLE);
727
728 spin_unlock(&info->lock);
729 time = schedule_hrtimeout_range_clock(timeout, 0,
730 HRTIMER_MODE_ABS, CLOCK_REALTIME);
731
732 if (READ_ONCE(ewp->state) == STATE_READY) {
733 /* see MQ_BARRIER for purpose/pairing */
734 smp_acquire__after_ctrl_dep();
735 retval = 0;
736 goto out;
737 }
738 spin_lock(&info->lock);
739
740 /* we hold info->lock, so no memory barrier required */
741 if (READ_ONCE(ewp->state) == STATE_READY) {
742 retval = 0;
743 goto out_unlock;
744 }
745 if (signal_pending(current)) {
746 retval = -ERESTARTSYS;
747 break;
748 }
749 if (time == 0) {
750 retval = -ETIMEDOUT;
751 break;
752 }
753 }
754 list_del(&ewp->list);
755 out_unlock:
756 spin_unlock(&info->lock);
757 out:
758 return retval;
759 }
760
761 /*
762 * Returns waiting task that should be serviced first or NULL if none exists
763 */
wq_get_first_waiter(struct mqueue_inode_info * info,int sr)764 static struct ext_wait_queue *wq_get_first_waiter(
765 struct mqueue_inode_info *info, int sr)
766 {
767 struct list_head *ptr;
768
769 ptr = info->e_wait_q[sr].list.prev;
770 if (ptr == &info->e_wait_q[sr].list)
771 return NULL;
772 return list_entry(ptr, struct ext_wait_queue, list);
773 }
774
775
set_cookie(struct sk_buff * skb,char code)776 static inline void set_cookie(struct sk_buff *skb, char code)
777 {
778 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
779 }
780
781 /*
782 * The next function is only to split too long sys_mq_timedsend
783 */
__do_notify(struct mqueue_inode_info * info)784 static void __do_notify(struct mqueue_inode_info *info)
785 {
786 /* notification
787 * invoked when there is registered process and there isn't process
788 * waiting synchronously for message AND state of queue changed from
789 * empty to not empty. Here we are sure that no one is waiting
790 * synchronously. */
791 if (info->notify_owner &&
792 info->attr.mq_curmsgs == 1) {
793 switch (info->notify.sigev_notify) {
794 case SIGEV_NONE:
795 break;
796 case SIGEV_SIGNAL: {
797 struct kernel_siginfo sig_i;
798 struct task_struct *task;
799
800 /* do_mq_notify() accepts sigev_signo == 0, why?? */
801 if (!info->notify.sigev_signo)
802 break;
803
804 clear_siginfo(&sig_i);
805 sig_i.si_signo = info->notify.sigev_signo;
806 sig_i.si_errno = 0;
807 sig_i.si_code = SI_MESGQ;
808 sig_i.si_value = info->notify.sigev_value;
809 rcu_read_lock();
810 /* map current pid/uid into info->owner's namespaces */
811 sig_i.si_pid = task_tgid_nr_ns(current,
812 ns_of_pid(info->notify_owner));
813 sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
814 current_uid());
815 /*
816 * We can't use kill_pid_info(), this signal should
817 * bypass check_kill_permission(). It is from kernel
818 * but si_fromuser() can't know this.
819 * We do check the self_exec_id, to avoid sending
820 * signals to programs that don't expect them.
821 */
822 task = pid_task(info->notify_owner, PIDTYPE_TGID);
823 if (task && task->self_exec_id ==
824 info->notify_self_exec_id) {
825 do_send_sig_info(info->notify.sigev_signo,
826 &sig_i, task, PIDTYPE_TGID);
827 }
828 rcu_read_unlock();
829 break;
830 }
831 case SIGEV_THREAD:
832 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
833 netlink_sendskb(info->notify_sock, info->notify_cookie);
834 break;
835 }
836 /* after notification unregisters process */
837 put_pid(info->notify_owner);
838 put_user_ns(info->notify_user_ns);
839 info->notify_owner = NULL;
840 info->notify_user_ns = NULL;
841 }
842 wake_up(&info->wait_q);
843 }
844
prepare_timeout(const struct __kernel_timespec __user * u_abs_timeout,struct timespec64 * ts)845 static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
846 struct timespec64 *ts)
847 {
848 if (get_timespec64(ts, u_abs_timeout))
849 return -EFAULT;
850 if (!timespec64_valid(ts))
851 return -EINVAL;
852 return 0;
853 }
854
remove_notification(struct mqueue_inode_info * info)855 static void remove_notification(struct mqueue_inode_info *info)
856 {
857 if (info->notify_owner != NULL &&
858 info->notify.sigev_notify == SIGEV_THREAD) {
859 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
860 netlink_sendskb(info->notify_sock, info->notify_cookie);
861 }
862 put_pid(info->notify_owner);
863 put_user_ns(info->notify_user_ns);
864 info->notify_owner = NULL;
865 info->notify_user_ns = NULL;
866 }
867
prepare_open(struct dentry * dentry,int oflag,int ro,umode_t mode,struct filename * name,struct mq_attr * attr)868 static int prepare_open(struct dentry *dentry, int oflag, int ro,
869 umode_t mode, struct filename *name,
870 struct mq_attr *attr)
871 {
872 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
873 MAY_READ | MAY_WRITE };
874 int acc;
875
876 if (d_really_is_negative(dentry)) {
877 if (!(oflag & O_CREAT))
878 return -ENOENT;
879 if (ro)
880 return ro;
881 audit_inode_parent_hidden(name, dentry->d_parent);
882 return vfs_mkobj(dentry, mode & ~current_umask(),
883 mqueue_create_attr, attr);
884 }
885 /* it already existed */
886 audit_inode(name, dentry, 0);
887 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
888 return -EEXIST;
889 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
890 return -EINVAL;
891 acc = oflag2acc[oflag & O_ACCMODE];
892 return inode_permission(&init_user_ns, d_inode(dentry), acc);
893 }
894
do_mq_open(const char __user * u_name,int oflag,umode_t mode,struct mq_attr * attr)895 static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
896 struct mq_attr *attr)
897 {
898 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
899 struct dentry *root = mnt->mnt_root;
900 struct filename *name;
901 struct path path;
902 int fd, error;
903 int ro;
904
905 audit_mq_open(oflag, mode, attr);
906
907 if (IS_ERR(name = getname(u_name)))
908 return PTR_ERR(name);
909
910 fd = get_unused_fd_flags(O_CLOEXEC);
911 if (fd < 0)
912 goto out_putname;
913
914 ro = mnt_want_write(mnt); /* we'll drop it in any case */
915 inode_lock(d_inode(root));
916 path.dentry = lookup_one_len(name->name, root, strlen(name->name));
917 if (IS_ERR(path.dentry)) {
918 error = PTR_ERR(path.dentry);
919 goto out_putfd;
920 }
921 path.mnt = mntget(mnt);
922 error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
923 if (!error) {
924 struct file *file = dentry_open(&path, oflag, current_cred());
925 if (!IS_ERR(file))
926 fd_install(fd, file);
927 else
928 error = PTR_ERR(file);
929 }
930 path_put(&path);
931 out_putfd:
932 if (error) {
933 put_unused_fd(fd);
934 fd = error;
935 }
936 inode_unlock(d_inode(root));
937 if (!ro)
938 mnt_drop_write(mnt);
939 out_putname:
940 putname(name);
941 return fd;
942 }
943
SYSCALL_DEFINE4(mq_open,const char __user *,u_name,int,oflag,umode_t,mode,struct mq_attr __user *,u_attr)944 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
945 struct mq_attr __user *, u_attr)
946 {
947 struct mq_attr attr;
948 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
949 return -EFAULT;
950
951 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
952 }
953
SYSCALL_DEFINE1(mq_unlink,const char __user *,u_name)954 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
955 {
956 int err;
957 struct filename *name;
958 struct dentry *dentry;
959 struct inode *inode = NULL;
960 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
961 struct vfsmount *mnt = ipc_ns->mq_mnt;
962
963 name = getname(u_name);
964 if (IS_ERR(name))
965 return PTR_ERR(name);
966
967 audit_inode_parent_hidden(name, mnt->mnt_root);
968 err = mnt_want_write(mnt);
969 if (err)
970 goto out_name;
971 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
972 dentry = lookup_one_len(name->name, mnt->mnt_root,
973 strlen(name->name));
974 if (IS_ERR(dentry)) {
975 err = PTR_ERR(dentry);
976 goto out_unlock;
977 }
978
979 inode = d_inode(dentry);
980 if (!inode) {
981 err = -ENOENT;
982 } else {
983 ihold(inode);
984 err = vfs_unlink(&init_user_ns, d_inode(dentry->d_parent),
985 dentry, NULL);
986 }
987 dput(dentry);
988
989 out_unlock:
990 inode_unlock(d_inode(mnt->mnt_root));
991 if (inode)
992 iput(inode);
993 mnt_drop_write(mnt);
994 out_name:
995 putname(name);
996
997 return err;
998 }
999
1000 /* Pipelined send and receive functions.
1001 *
1002 * If a receiver finds no waiting message, then it registers itself in the
1003 * list of waiting receivers. A sender checks that list before adding the new
1004 * message into the message array. If there is a waiting receiver, then it
1005 * bypasses the message array and directly hands the message over to the
1006 * receiver. The receiver accepts the message and returns without grabbing the
1007 * queue spinlock:
1008 *
1009 * - Set pointer to message.
1010 * - Queue the receiver task for later wakeup (without the info->lock).
1011 * - Update its state to STATE_READY. Now the receiver can continue.
1012 * - Wake up the process after the lock is dropped. Should the process wake up
1013 * before this wakeup (due to a timeout or a signal) it will either see
1014 * STATE_READY and continue or acquire the lock to check the state again.
1015 *
1016 * The same algorithm is used for senders.
1017 */
1018
__pipelined_op(struct wake_q_head * wake_q,struct mqueue_inode_info * info,struct ext_wait_queue * this)1019 static inline void __pipelined_op(struct wake_q_head *wake_q,
1020 struct mqueue_inode_info *info,
1021 struct ext_wait_queue *this)
1022 {
1023 struct task_struct *task;
1024
1025 list_del(&this->list);
1026 task = get_task_struct(this->task);
1027
1028 /* see MQ_BARRIER for purpose/pairing */
1029 smp_store_release(&this->state, STATE_READY);
1030 wake_q_add_safe(wake_q, task);
1031 }
1032
1033 /* pipelined_send() - send a message directly to the task waiting in
1034 * sys_mq_timedreceive() (without inserting message into a queue).
1035 */
pipelined_send(struct wake_q_head * wake_q,struct mqueue_inode_info * info,struct msg_msg * message,struct ext_wait_queue * receiver)1036 static inline void pipelined_send(struct wake_q_head *wake_q,
1037 struct mqueue_inode_info *info,
1038 struct msg_msg *message,
1039 struct ext_wait_queue *receiver)
1040 {
1041 receiver->msg = message;
1042 __pipelined_op(wake_q, info, receiver);
1043 }
1044
1045 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1046 * gets its message and put to the queue (we have one free place for sure). */
pipelined_receive(struct wake_q_head * wake_q,struct mqueue_inode_info * info)1047 static inline void pipelined_receive(struct wake_q_head *wake_q,
1048 struct mqueue_inode_info *info)
1049 {
1050 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
1051
1052 if (!sender) {
1053 /* for poll */
1054 wake_up_interruptible(&info->wait_q);
1055 return;
1056 }
1057 if (msg_insert(sender->msg, info))
1058 return;
1059
1060 __pipelined_op(wake_q, info, sender);
1061 }
1062
do_mq_timedsend(mqd_t mqdes,const char __user * u_msg_ptr,size_t msg_len,unsigned int msg_prio,struct timespec64 * ts)1063 static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
1064 size_t msg_len, unsigned int msg_prio,
1065 struct timespec64 *ts)
1066 {
1067 struct fd f;
1068 struct inode *inode;
1069 struct ext_wait_queue wait;
1070 struct ext_wait_queue *receiver;
1071 struct msg_msg *msg_ptr;
1072 struct mqueue_inode_info *info;
1073 ktime_t expires, *timeout = NULL;
1074 struct posix_msg_tree_node *new_leaf = NULL;
1075 int ret = 0;
1076 DEFINE_WAKE_Q(wake_q);
1077
1078 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
1079 return -EINVAL;
1080
1081 if (ts) {
1082 expires = timespec64_to_ktime(*ts);
1083 timeout = &expires;
1084 }
1085
1086 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
1087
1088 f = fdget(mqdes);
1089 if (unlikely(!f.file)) {
1090 ret = -EBADF;
1091 goto out;
1092 }
1093
1094 inode = file_inode(f.file);
1095 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1096 ret = -EBADF;
1097 goto out_fput;
1098 }
1099 info = MQUEUE_I(inode);
1100 audit_file(f.file);
1101
1102 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1103 ret = -EBADF;
1104 goto out_fput;
1105 }
1106
1107 if (unlikely(msg_len > info->attr.mq_msgsize)) {
1108 ret = -EMSGSIZE;
1109 goto out_fput;
1110 }
1111
1112 /* First try to allocate memory, before doing anything with
1113 * existing queues. */
1114 msg_ptr = load_msg(u_msg_ptr, msg_len);
1115 if (IS_ERR(msg_ptr)) {
1116 ret = PTR_ERR(msg_ptr);
1117 goto out_fput;
1118 }
1119 msg_ptr->m_ts = msg_len;
1120 msg_ptr->m_type = msg_prio;
1121
1122 /*
1123 * msg_insert really wants us to have a valid, spare node struct so
1124 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1125 * fall back to that if necessary.
1126 */
1127 if (!info->node_cache)
1128 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1129
1130 spin_lock(&info->lock);
1131
1132 if (!info->node_cache && new_leaf) {
1133 /* Save our speculative allocation into the cache */
1134 INIT_LIST_HEAD(&new_leaf->msg_list);
1135 info->node_cache = new_leaf;
1136 new_leaf = NULL;
1137 } else {
1138 kfree(new_leaf);
1139 }
1140
1141 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1142 if (f.file->f_flags & O_NONBLOCK) {
1143 ret = -EAGAIN;
1144 } else {
1145 wait.task = current;
1146 wait.msg = (void *) msg_ptr;
1147
1148 /* memory barrier not required, we hold info->lock */
1149 WRITE_ONCE(wait.state, STATE_NONE);
1150 ret = wq_sleep(info, SEND, timeout, &wait);
1151 /*
1152 * wq_sleep must be called with info->lock held, and
1153 * returns with the lock released
1154 */
1155 goto out_free;
1156 }
1157 } else {
1158 receiver = wq_get_first_waiter(info, RECV);
1159 if (receiver) {
1160 pipelined_send(&wake_q, info, msg_ptr, receiver);
1161 } else {
1162 /* adds message to the queue */
1163 ret = msg_insert(msg_ptr, info);
1164 if (ret)
1165 goto out_unlock;
1166 __do_notify(info);
1167 }
1168 inode->i_atime = inode->i_mtime = inode->i_ctime =
1169 current_time(inode);
1170 }
1171 out_unlock:
1172 spin_unlock(&info->lock);
1173 wake_up_q(&wake_q);
1174 out_free:
1175 if (ret)
1176 free_msg(msg_ptr);
1177 out_fput:
1178 fdput(f);
1179 out:
1180 return ret;
1181 }
1182
do_mq_timedreceive(mqd_t mqdes,char __user * u_msg_ptr,size_t msg_len,unsigned int __user * u_msg_prio,struct timespec64 * ts)1183 static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1184 size_t msg_len, unsigned int __user *u_msg_prio,
1185 struct timespec64 *ts)
1186 {
1187 ssize_t ret;
1188 struct msg_msg *msg_ptr;
1189 struct fd f;
1190 struct inode *inode;
1191 struct mqueue_inode_info *info;
1192 struct ext_wait_queue wait;
1193 ktime_t expires, *timeout = NULL;
1194 struct posix_msg_tree_node *new_leaf = NULL;
1195
1196 if (ts) {
1197 expires = timespec64_to_ktime(*ts);
1198 timeout = &expires;
1199 }
1200
1201 audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1202
1203 f = fdget(mqdes);
1204 if (unlikely(!f.file)) {
1205 ret = -EBADF;
1206 goto out;
1207 }
1208
1209 inode = file_inode(f.file);
1210 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1211 ret = -EBADF;
1212 goto out_fput;
1213 }
1214 info = MQUEUE_I(inode);
1215 audit_file(f.file);
1216
1217 if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1218 ret = -EBADF;
1219 goto out_fput;
1220 }
1221
1222 /* checks if buffer is big enough */
1223 if (unlikely(msg_len < info->attr.mq_msgsize)) {
1224 ret = -EMSGSIZE;
1225 goto out_fput;
1226 }
1227
1228 /*
1229 * msg_insert really wants us to have a valid, spare node struct so
1230 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1231 * fall back to that if necessary.
1232 */
1233 if (!info->node_cache)
1234 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1235
1236 spin_lock(&info->lock);
1237
1238 if (!info->node_cache && new_leaf) {
1239 /* Save our speculative allocation into the cache */
1240 INIT_LIST_HEAD(&new_leaf->msg_list);
1241 info->node_cache = new_leaf;
1242 } else {
1243 kfree(new_leaf);
1244 }
1245
1246 if (info->attr.mq_curmsgs == 0) {
1247 if (f.file->f_flags & O_NONBLOCK) {
1248 spin_unlock(&info->lock);
1249 ret = -EAGAIN;
1250 } else {
1251 wait.task = current;
1252
1253 /* memory barrier not required, we hold info->lock */
1254 WRITE_ONCE(wait.state, STATE_NONE);
1255 ret = wq_sleep(info, RECV, timeout, &wait);
1256 msg_ptr = wait.msg;
1257 }
1258 } else {
1259 DEFINE_WAKE_Q(wake_q);
1260
1261 msg_ptr = msg_get(info);
1262
1263 inode->i_atime = inode->i_mtime = inode->i_ctime =
1264 current_time(inode);
1265
1266 /* There is now free space in queue. */
1267 pipelined_receive(&wake_q, info);
1268 spin_unlock(&info->lock);
1269 wake_up_q(&wake_q);
1270 ret = 0;
1271 }
1272 if (ret == 0) {
1273 ret = msg_ptr->m_ts;
1274
1275 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1276 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1277 ret = -EFAULT;
1278 }
1279 free_msg(msg_ptr);
1280 }
1281 out_fput:
1282 fdput(f);
1283 out:
1284 return ret;
1285 }
1286
SYSCALL_DEFINE5(mq_timedsend,mqd_t,mqdes,const char __user *,u_msg_ptr,size_t,msg_len,unsigned int,msg_prio,const struct __kernel_timespec __user *,u_abs_timeout)1287 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1288 size_t, msg_len, unsigned int, msg_prio,
1289 const struct __kernel_timespec __user *, u_abs_timeout)
1290 {
1291 struct timespec64 ts, *p = NULL;
1292 if (u_abs_timeout) {
1293 int res = prepare_timeout(u_abs_timeout, &ts);
1294 if (res)
1295 return res;
1296 p = &ts;
1297 }
1298 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1299 }
1300
SYSCALL_DEFINE5(mq_timedreceive,mqd_t,mqdes,char __user *,u_msg_ptr,size_t,msg_len,unsigned int __user *,u_msg_prio,const struct __kernel_timespec __user *,u_abs_timeout)1301 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1302 size_t, msg_len, unsigned int __user *, u_msg_prio,
1303 const struct __kernel_timespec __user *, u_abs_timeout)
1304 {
1305 struct timespec64 ts, *p = NULL;
1306 if (u_abs_timeout) {
1307 int res = prepare_timeout(u_abs_timeout, &ts);
1308 if (res)
1309 return res;
1310 p = &ts;
1311 }
1312 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1313 }
1314
1315 /*
1316 * Notes: the case when user wants us to deregister (with NULL as pointer)
1317 * and he isn't currently owner of notification, will be silently discarded.
1318 * It isn't explicitly defined in the POSIX.
1319 */
do_mq_notify(mqd_t mqdes,const struct sigevent * notification)1320 static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1321 {
1322 int ret;
1323 struct fd f;
1324 struct sock *sock;
1325 struct inode *inode;
1326 struct mqueue_inode_info *info;
1327 struct sk_buff *nc;
1328
1329 audit_mq_notify(mqdes, notification);
1330
1331 nc = NULL;
1332 sock = NULL;
1333 if (notification != NULL) {
1334 if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1335 notification->sigev_notify != SIGEV_SIGNAL &&
1336 notification->sigev_notify != SIGEV_THREAD))
1337 return -EINVAL;
1338 if (notification->sigev_notify == SIGEV_SIGNAL &&
1339 !valid_signal(notification->sigev_signo)) {
1340 return -EINVAL;
1341 }
1342 if (notification->sigev_notify == SIGEV_THREAD) {
1343 long timeo;
1344
1345 /* create the notify skb */
1346 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1347 if (!nc)
1348 return -ENOMEM;
1349
1350 if (copy_from_user(nc->data,
1351 notification->sigev_value.sival_ptr,
1352 NOTIFY_COOKIE_LEN)) {
1353 ret = -EFAULT;
1354 goto free_skb;
1355 }
1356
1357 /* TODO: add a header? */
1358 skb_put(nc, NOTIFY_COOKIE_LEN);
1359 /* and attach it to the socket */
1360 retry:
1361 f = fdget(notification->sigev_signo);
1362 if (!f.file) {
1363 ret = -EBADF;
1364 goto out;
1365 }
1366 sock = netlink_getsockbyfilp(f.file);
1367 fdput(f);
1368 if (IS_ERR(sock)) {
1369 ret = PTR_ERR(sock);
1370 goto free_skb;
1371 }
1372
1373 timeo = MAX_SCHEDULE_TIMEOUT;
1374 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1375 if (ret == 1) {
1376 sock = NULL;
1377 goto retry;
1378 }
1379 if (ret)
1380 return ret;
1381 }
1382 }
1383
1384 f = fdget(mqdes);
1385 if (!f.file) {
1386 ret = -EBADF;
1387 goto out;
1388 }
1389
1390 inode = file_inode(f.file);
1391 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1392 ret = -EBADF;
1393 goto out_fput;
1394 }
1395 info = MQUEUE_I(inode);
1396
1397 ret = 0;
1398 spin_lock(&info->lock);
1399 if (notification == NULL) {
1400 if (info->notify_owner == task_tgid(current)) {
1401 remove_notification(info);
1402 inode->i_atime = inode->i_ctime = current_time(inode);
1403 }
1404 } else if (info->notify_owner != NULL) {
1405 ret = -EBUSY;
1406 } else {
1407 switch (notification->sigev_notify) {
1408 case SIGEV_NONE:
1409 info->notify.sigev_notify = SIGEV_NONE;
1410 break;
1411 case SIGEV_THREAD:
1412 info->notify_sock = sock;
1413 info->notify_cookie = nc;
1414 sock = NULL;
1415 nc = NULL;
1416 info->notify.sigev_notify = SIGEV_THREAD;
1417 break;
1418 case SIGEV_SIGNAL:
1419 info->notify.sigev_signo = notification->sigev_signo;
1420 info->notify.sigev_value = notification->sigev_value;
1421 info->notify.sigev_notify = SIGEV_SIGNAL;
1422 info->notify_self_exec_id = current->self_exec_id;
1423 break;
1424 }
1425
1426 info->notify_owner = get_pid(task_tgid(current));
1427 info->notify_user_ns = get_user_ns(current_user_ns());
1428 inode->i_atime = inode->i_ctime = current_time(inode);
1429 }
1430 spin_unlock(&info->lock);
1431 out_fput:
1432 fdput(f);
1433 out:
1434 if (sock)
1435 netlink_detachskb(sock, nc);
1436 else
1437 free_skb:
1438 dev_kfree_skb(nc);
1439
1440 return ret;
1441 }
1442
SYSCALL_DEFINE2(mq_notify,mqd_t,mqdes,const struct sigevent __user *,u_notification)1443 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1444 const struct sigevent __user *, u_notification)
1445 {
1446 struct sigevent n, *p = NULL;
1447 if (u_notification) {
1448 if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1449 return -EFAULT;
1450 p = &n;
1451 }
1452 return do_mq_notify(mqdes, p);
1453 }
1454
do_mq_getsetattr(int mqdes,struct mq_attr * new,struct mq_attr * old)1455 static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1456 {
1457 struct fd f;
1458 struct inode *inode;
1459 struct mqueue_inode_info *info;
1460
1461 if (new && (new->mq_flags & (~O_NONBLOCK)))
1462 return -EINVAL;
1463
1464 f = fdget(mqdes);
1465 if (!f.file)
1466 return -EBADF;
1467
1468 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1469 fdput(f);
1470 return -EBADF;
1471 }
1472
1473 inode = file_inode(f.file);
1474 info = MQUEUE_I(inode);
1475
1476 spin_lock(&info->lock);
1477
1478 if (old) {
1479 *old = info->attr;
1480 old->mq_flags = f.file->f_flags & O_NONBLOCK;
1481 }
1482 if (new) {
1483 audit_mq_getsetattr(mqdes, new);
1484 spin_lock(&f.file->f_lock);
1485 if (new->mq_flags & O_NONBLOCK)
1486 f.file->f_flags |= O_NONBLOCK;
1487 else
1488 f.file->f_flags &= ~O_NONBLOCK;
1489 spin_unlock(&f.file->f_lock);
1490
1491 inode->i_atime = inode->i_ctime = current_time(inode);
1492 }
1493
1494 spin_unlock(&info->lock);
1495 fdput(f);
1496 return 0;
1497 }
1498
SYSCALL_DEFINE3(mq_getsetattr,mqd_t,mqdes,const struct mq_attr __user *,u_mqstat,struct mq_attr __user *,u_omqstat)1499 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1500 const struct mq_attr __user *, u_mqstat,
1501 struct mq_attr __user *, u_omqstat)
1502 {
1503 int ret;
1504 struct mq_attr mqstat, omqstat;
1505 struct mq_attr *new = NULL, *old = NULL;
1506
1507 if (u_mqstat) {
1508 new = &mqstat;
1509 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1510 return -EFAULT;
1511 }
1512 if (u_omqstat)
1513 old = &omqstat;
1514
1515 ret = do_mq_getsetattr(mqdes, new, old);
1516 if (ret || !old)
1517 return ret;
1518
1519 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1520 return -EFAULT;
1521 return 0;
1522 }
1523
1524 #ifdef CONFIG_COMPAT
1525
1526 struct compat_mq_attr {
1527 compat_long_t mq_flags; /* message queue flags */
1528 compat_long_t mq_maxmsg; /* maximum number of messages */
1529 compat_long_t mq_msgsize; /* maximum message size */
1530 compat_long_t mq_curmsgs; /* number of messages currently queued */
1531 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1532 };
1533
get_compat_mq_attr(struct mq_attr * attr,const struct compat_mq_attr __user * uattr)1534 static inline int get_compat_mq_attr(struct mq_attr *attr,
1535 const struct compat_mq_attr __user *uattr)
1536 {
1537 struct compat_mq_attr v;
1538
1539 if (copy_from_user(&v, uattr, sizeof(*uattr)))
1540 return -EFAULT;
1541
1542 memset(attr, 0, sizeof(*attr));
1543 attr->mq_flags = v.mq_flags;
1544 attr->mq_maxmsg = v.mq_maxmsg;
1545 attr->mq_msgsize = v.mq_msgsize;
1546 attr->mq_curmsgs = v.mq_curmsgs;
1547 return 0;
1548 }
1549
put_compat_mq_attr(const struct mq_attr * attr,struct compat_mq_attr __user * uattr)1550 static inline int put_compat_mq_attr(const struct mq_attr *attr,
1551 struct compat_mq_attr __user *uattr)
1552 {
1553 struct compat_mq_attr v;
1554
1555 memset(&v, 0, sizeof(v));
1556 v.mq_flags = attr->mq_flags;
1557 v.mq_maxmsg = attr->mq_maxmsg;
1558 v.mq_msgsize = attr->mq_msgsize;
1559 v.mq_curmsgs = attr->mq_curmsgs;
1560 if (copy_to_user(uattr, &v, sizeof(*uattr)))
1561 return -EFAULT;
1562 return 0;
1563 }
1564
COMPAT_SYSCALL_DEFINE4(mq_open,const char __user *,u_name,int,oflag,compat_mode_t,mode,struct compat_mq_attr __user *,u_attr)1565 COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1566 int, oflag, compat_mode_t, mode,
1567 struct compat_mq_attr __user *, u_attr)
1568 {
1569 struct mq_attr attr, *p = NULL;
1570 if (u_attr && oflag & O_CREAT) {
1571 p = &attr;
1572 if (get_compat_mq_attr(&attr, u_attr))
1573 return -EFAULT;
1574 }
1575 return do_mq_open(u_name, oflag, mode, p);
1576 }
1577
COMPAT_SYSCALL_DEFINE2(mq_notify,mqd_t,mqdes,const struct compat_sigevent __user *,u_notification)1578 COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1579 const struct compat_sigevent __user *, u_notification)
1580 {
1581 struct sigevent n, *p = NULL;
1582 if (u_notification) {
1583 if (get_compat_sigevent(&n, u_notification))
1584 return -EFAULT;
1585 if (n.sigev_notify == SIGEV_THREAD)
1586 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1587 p = &n;
1588 }
1589 return do_mq_notify(mqdes, p);
1590 }
1591
COMPAT_SYSCALL_DEFINE3(mq_getsetattr,mqd_t,mqdes,const struct compat_mq_attr __user *,u_mqstat,struct compat_mq_attr __user *,u_omqstat)1592 COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1593 const struct compat_mq_attr __user *, u_mqstat,
1594 struct compat_mq_attr __user *, u_omqstat)
1595 {
1596 int ret;
1597 struct mq_attr mqstat, omqstat;
1598 struct mq_attr *new = NULL, *old = NULL;
1599
1600 if (u_mqstat) {
1601 new = &mqstat;
1602 if (get_compat_mq_attr(new, u_mqstat))
1603 return -EFAULT;
1604 }
1605 if (u_omqstat)
1606 old = &omqstat;
1607
1608 ret = do_mq_getsetattr(mqdes, new, old);
1609 if (ret || !old)
1610 return ret;
1611
1612 if (put_compat_mq_attr(old, u_omqstat))
1613 return -EFAULT;
1614 return 0;
1615 }
1616 #endif
1617
1618 #ifdef CONFIG_COMPAT_32BIT_TIME
compat_prepare_timeout(const struct old_timespec32 __user * p,struct timespec64 * ts)1619 static int compat_prepare_timeout(const struct old_timespec32 __user *p,
1620 struct timespec64 *ts)
1621 {
1622 if (get_old_timespec32(ts, p))
1623 return -EFAULT;
1624 if (!timespec64_valid(ts))
1625 return -EINVAL;
1626 return 0;
1627 }
1628
SYSCALL_DEFINE5(mq_timedsend_time32,mqd_t,mqdes,const char __user *,u_msg_ptr,unsigned int,msg_len,unsigned int,msg_prio,const struct old_timespec32 __user *,u_abs_timeout)1629 SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1630 const char __user *, u_msg_ptr,
1631 unsigned int, msg_len, unsigned int, msg_prio,
1632 const struct old_timespec32 __user *, u_abs_timeout)
1633 {
1634 struct timespec64 ts, *p = NULL;
1635 if (u_abs_timeout) {
1636 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1637 if (res)
1638 return res;
1639 p = &ts;
1640 }
1641 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1642 }
1643
SYSCALL_DEFINE5(mq_timedreceive_time32,mqd_t,mqdes,char __user *,u_msg_ptr,unsigned int,msg_len,unsigned int __user *,u_msg_prio,const struct old_timespec32 __user *,u_abs_timeout)1644 SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1645 char __user *, u_msg_ptr,
1646 unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1647 const struct old_timespec32 __user *, u_abs_timeout)
1648 {
1649 struct timespec64 ts, *p = NULL;
1650 if (u_abs_timeout) {
1651 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1652 if (res)
1653 return res;
1654 p = &ts;
1655 }
1656 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1657 }
1658 #endif
1659
1660 static const struct inode_operations mqueue_dir_inode_operations = {
1661 .lookup = simple_lookup,
1662 .create = mqueue_create,
1663 .unlink = mqueue_unlink,
1664 };
1665
1666 static const struct file_operations mqueue_file_operations = {
1667 .flush = mqueue_flush_file,
1668 .poll = mqueue_poll_file,
1669 .read = mqueue_read_file,
1670 .llseek = default_llseek,
1671 };
1672
1673 static const struct super_operations mqueue_super_ops = {
1674 .alloc_inode = mqueue_alloc_inode,
1675 .free_inode = mqueue_free_inode,
1676 .evict_inode = mqueue_evict_inode,
1677 .statfs = simple_statfs,
1678 };
1679
1680 static const struct fs_context_operations mqueue_fs_context_ops = {
1681 .free = mqueue_fs_context_free,
1682 .get_tree = mqueue_get_tree,
1683 };
1684
1685 static struct file_system_type mqueue_fs_type = {
1686 .name = "mqueue",
1687 .init_fs_context = mqueue_init_fs_context,
1688 .kill_sb = kill_litter_super,
1689 .fs_flags = FS_USERNS_MOUNT,
1690 };
1691
mq_init_ns(struct ipc_namespace * ns)1692 int mq_init_ns(struct ipc_namespace *ns)
1693 {
1694 struct vfsmount *m;
1695
1696 ns->mq_queues_count = 0;
1697 ns->mq_queues_max = DFLT_QUEUESMAX;
1698 ns->mq_msg_max = DFLT_MSGMAX;
1699 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
1700 ns->mq_msg_default = DFLT_MSG;
1701 ns->mq_msgsize_default = DFLT_MSGSIZE;
1702
1703 m = mq_create_mount(ns);
1704 if (IS_ERR(m))
1705 return PTR_ERR(m);
1706 ns->mq_mnt = m;
1707 return 0;
1708 }
1709
mq_clear_sbinfo(struct ipc_namespace * ns)1710 void mq_clear_sbinfo(struct ipc_namespace *ns)
1711 {
1712 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1713 }
1714
mq_put_mnt(struct ipc_namespace * ns)1715 void mq_put_mnt(struct ipc_namespace *ns)
1716 {
1717 kern_unmount(ns->mq_mnt);
1718 }
1719
init_mqueue_fs(void)1720 static int __init init_mqueue_fs(void)
1721 {
1722 int error;
1723
1724 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1725 sizeof(struct mqueue_inode_info), 0,
1726 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1727 if (mqueue_inode_cachep == NULL)
1728 return -ENOMEM;
1729
1730 /* ignore failures - they are not fatal */
1731 mq_sysctl_table = mq_register_sysctl_table();
1732
1733 error = register_filesystem(&mqueue_fs_type);
1734 if (error)
1735 goto out_sysctl;
1736
1737 spin_lock_init(&mq_lock);
1738
1739 error = mq_init_ns(&init_ipc_ns);
1740 if (error)
1741 goto out_filesystem;
1742
1743 return 0;
1744
1745 out_filesystem:
1746 unregister_filesystem(&mqueue_fs_type);
1747 out_sysctl:
1748 if (mq_sysctl_table)
1749 unregister_sysctl_table(mq_sysctl_table);
1750 kmem_cache_destroy(mqueue_inode_cachep);
1751 return error;
1752 }
1753
1754 device_initcall(init_mqueue_fs);
1755