• Home
  • Raw
  • Download

Lines Matching +full:foo +full:- +full:queue

61 struct ext_wait_queue {		/* queue of sleeping tasks */
88 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
110 return get_ipc_ns(inode->i_sb->s_fs_info); in __get_ns_from_inode()
129 p = &info->msg_tree.rb_node; in msg_insert()
134 if (likely(leaf->priority == msg->m_type)) in msg_insert()
136 else if (msg->m_type < leaf->priority) in msg_insert()
137 p = &(*p)->rb_left; in msg_insert()
139 p = &(*p)->rb_right; in msg_insert()
141 if (info->node_cache) { in msg_insert()
142 leaf = info->node_cache; in msg_insert()
143 info->node_cache = NULL; in msg_insert()
147 return -ENOMEM; in msg_insert()
148 INIT_LIST_HEAD(&leaf->msg_list); in msg_insert()
150 leaf->priority = msg->m_type; in msg_insert()
151 rb_link_node(&leaf->rb_node, parent, p); in msg_insert()
152 rb_insert_color(&leaf->rb_node, &info->msg_tree); in msg_insert()
154 info->attr.mq_curmsgs++; in msg_insert()
155 info->qsize += msg->m_ts; in msg_insert()
156 list_add_tail(&msg->m_list, &leaf->msg_list); in msg_insert()
167 p = &info->msg_tree.rb_node; in msg_get()
175 p = &(*p)->rb_right; in msg_get()
178 if (info->attr.mq_curmsgs) { in msg_get()
179 pr_warn_once("Inconsistency in POSIX message queue, " in msg_get()
182 info->attr.mq_curmsgs = 0; in msg_get()
187 if (unlikely(list_empty(&leaf->msg_list))) { in msg_get()
188 pr_warn_once("Inconsistency in POSIX message queue, " in msg_get()
191 rb_erase(&leaf->rb_node, &info->msg_tree); in msg_get()
192 if (info->node_cache) { in msg_get()
195 info->node_cache = leaf; in msg_get()
199 msg = list_first_entry(&leaf->msg_list, in msg_get()
201 list_del(&msg->m_list); in msg_get()
202 if (list_empty(&leaf->msg_list)) { in msg_get()
203 rb_erase(&leaf->rb_node, &info->msg_tree); in msg_get()
204 if (info->node_cache) { in msg_get()
207 info->node_cache = leaf; in msg_get()
211 info->attr.mq_curmsgs--; in msg_get()
212 info->qsize -= msg->m_ts; in msg_get()
222 int ret = -ENOMEM; in mqueue_get_inode()
228 inode->i_ino = get_next_ino(); in mqueue_get_inode()
229 inode->i_mode = mode; in mqueue_get_inode()
230 inode->i_uid = current_fsuid(); in mqueue_get_inode()
231 inode->i_gid = current_fsgid(); in mqueue_get_inode()
232 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode); in mqueue_get_inode()
238 inode->i_fop = &mqueue_file_operations; in mqueue_get_inode()
239 inode->i_size = FILENT_SIZE; in mqueue_get_inode()
242 spin_lock_init(&info->lock); in mqueue_get_inode()
243 init_waitqueue_head(&info->wait_q); in mqueue_get_inode()
244 INIT_LIST_HEAD(&info->e_wait_q[0].list); in mqueue_get_inode()
245 INIT_LIST_HEAD(&info->e_wait_q[1].list); in mqueue_get_inode()
246 info->notify_owner = NULL; in mqueue_get_inode()
247 info->notify_user_ns = NULL; in mqueue_get_inode()
248 info->qsize = 0; in mqueue_get_inode()
249 info->user = NULL; /* set when all is ok */ in mqueue_get_inode()
250 info->msg_tree = RB_ROOT; in mqueue_get_inode()
251 info->node_cache = NULL; in mqueue_get_inode()
252 memset(&info->attr, 0, sizeof(info->attr)); in mqueue_get_inode()
253 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, in mqueue_get_inode()
254 ipc_ns->mq_msg_default); in mqueue_get_inode()
255 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, in mqueue_get_inode()
256 ipc_ns->mq_msgsize_default); in mqueue_get_inode()
258 info->attr.mq_maxmsg = attr->mq_maxmsg; in mqueue_get_inode()
259 info->attr.mq_msgsize = attr->mq_msgsize; in mqueue_get_inode()
264 * possible message into the queue size. That's no longer in mqueue_get_inode()
265 * accurate as the queue is now an rbtree and will grow and in mqueue_get_inode()
275 ret = -EINVAL; in mqueue_get_inode()
276 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0) in mqueue_get_inode()
279 if (info->attr.mq_maxmsg > HARD_MSGMAX || in mqueue_get_inode()
280 info->attr.mq_msgsize > HARD_MSGSIZEMAX) in mqueue_get_inode()
283 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max || in mqueue_get_inode()
284 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max) in mqueue_get_inode()
287 ret = -EOVERFLOW; in mqueue_get_inode()
289 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg) in mqueue_get_inode()
291 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + in mqueue_get_inode()
292 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * in mqueue_get_inode()
294 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize; in mqueue_get_inode()
299 if (u->mq_bytes + mq_bytes < u->mq_bytes || in mqueue_get_inode()
300 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { in mqueue_get_inode()
302 /* mqueue_evict_inode() releases info->messages */ in mqueue_get_inode()
303 ret = -EMFILE; in mqueue_get_inode()
306 u->mq_bytes += mq_bytes; in mqueue_get_inode()
310 info->user = get_uid(u); in mqueue_get_inode()
314 inode->i_size = 2 * DIRENT_SIZE; in mqueue_get_inode()
315 inode->i_op = &mqueue_dir_inode_operations; in mqueue_get_inode()
316 inode->i_fop = &simple_dir_operations; in mqueue_get_inode()
329 struct ipc_namespace *ns = sb->s_fs_info; in mqueue_fill_super()
331 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; in mqueue_fill_super()
332 sb->s_blocksize = PAGE_SIZE; in mqueue_fill_super()
333 sb->s_blocksize_bits = PAGE_SHIFT; in mqueue_fill_super()
334 sb->s_magic = MQUEUE_MAGIC; in mqueue_fill_super()
335 sb->s_op = &mqueue_super_ops; in mqueue_fill_super()
341 sb->s_root = d_make_root(inode); in mqueue_fill_super()
342 if (!sb->s_root) in mqueue_fill_super()
343 return -ENOMEM; in mqueue_fill_super()
356 ns = current->nsproxy->ipc_ns; in mqueue_mount()
358 return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super); in mqueue_mount()
361 static void init_once(void *foo) in init_once() argument
363 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; in init_once()
365 inode_init_once(&p->vfs_inode); in init_once()
375 return &ei->vfs_inode; in mqueue_alloc_inode()
386 call_rcu(&inode->i_rcu, mqueue_i_callback); in mqueue_destroy_inode()
399 if (S_ISDIR(inode->i_mode)) in mqueue_evict_inode()
404 spin_lock(&info->lock); in mqueue_evict_inode()
406 list_add_tail(&msg->m_list, &tmp_msg); in mqueue_evict_inode()
407 kfree(info->node_cache); in mqueue_evict_inode()
408 spin_unlock(&info->lock); in mqueue_evict_inode()
411 list_del(&msg->m_list); in mqueue_evict_inode()
415 user = info->user; in mqueue_evict_inode()
420 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + in mqueue_evict_inode()
421 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * in mqueue_evict_inode()
424 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * in mqueue_evict_inode()
425 info->attr.mq_msgsize); in mqueue_evict_inode()
428 user->mq_bytes -= mq_bytes; in mqueue_evict_inode()
431 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns in mqueue_evict_inode()
436 ipc_ns->mq_queues_count--; in mqueue_evict_inode()
446 struct inode *dir = dentry->d_parent->d_inode; in mqueue_create_attr()
455 error = -EACCES; in mqueue_create_attr()
459 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && in mqueue_create_attr()
461 error = -ENOSPC; in mqueue_create_attr()
464 ipc_ns->mq_queues_count++; in mqueue_create_attr()
467 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); in mqueue_create_attr()
471 ipc_ns->mq_queues_count--; in mqueue_create_attr()
476 dir->i_size += DIRENT_SIZE; in mqueue_create_attr()
477 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); in mqueue_create_attr()
499 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); in mqueue_unlink()
500 dir->i_size -= DIRENT_SIZE; in mqueue_unlink()
507 * This is routine for system read from queue file.
509 * to read only queue size & notification info (the only values
520 spin_lock(&info->lock); in mqueue_read_file()
522 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", in mqueue_read_file()
523 info->qsize, in mqueue_read_file()
524 info->notify_owner ? info->notify.sigev_notify : 0, in mqueue_read_file()
525 (info->notify_owner && in mqueue_read_file()
526 info->notify.sigev_notify == SIGEV_SIGNAL) ? in mqueue_read_file()
527 info->notify.sigev_signo : 0, in mqueue_read_file()
528 pid_vnr(info->notify_owner)); in mqueue_read_file()
529 spin_unlock(&info->lock); in mqueue_read_file()
530 buffer[sizeof(buffer)-1] = '\0'; in mqueue_read_file()
537 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp)); in mqueue_read_file()
545 spin_lock(&info->lock); in mqueue_flush_file()
546 if (task_tgid(current) == info->notify_owner) in mqueue_flush_file()
549 spin_unlock(&info->lock); in mqueue_flush_file()
558 poll_wait(filp, &info->wait_q, poll_tab); in mqueue_poll_file()
560 spin_lock(&info->lock); in mqueue_poll_file()
561 if (info->attr.mq_curmsgs) in mqueue_poll_file()
564 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) in mqueue_poll_file()
566 spin_unlock(&info->lock); in mqueue_poll_file()
571 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
577 ewp->task = current; in wq_add()
579 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { in wq_add()
580 if (walk->task->prio <= current->prio) { in wq_add()
581 list_add_tail(&ewp->list, &walk->list); in wq_add()
585 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); in wq_add()
589 * Puts current task to sleep. Caller must hold queue lock. After return
595 __releases(&info->lock) in wq_sleep()
605 spin_unlock(&info->lock); in wq_sleep()
609 if (ewp->state == STATE_READY) { in wq_sleep()
613 spin_lock(&info->lock); in wq_sleep()
614 if (ewp->state == STATE_READY) { in wq_sleep()
619 retval = -ERESTARTSYS; in wq_sleep()
623 retval = -ETIMEDOUT; in wq_sleep()
627 list_del(&ewp->list); in wq_sleep()
629 spin_unlock(&info->lock); in wq_sleep()
642 ptr = info->e_wait_q[sr].list.prev; in wq_get_first_waiter()
643 if (ptr == &info->e_wait_q[sr].list) in wq_get_first_waiter()
651 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; in set_cookie()
661 * waiting synchronously for message AND state of queue changed from in __do_notify()
664 if (info->notify_owner && in __do_notify()
665 info->attr.mq_curmsgs == 1) { in __do_notify()
666 switch (info->notify.sigev_notify) { in __do_notify()
674 if (!info->notify.sigev_signo) in __do_notify()
678 sig_i.si_signo = info->notify.sigev_signo; in __do_notify()
681 sig_i.si_value = info->notify.sigev_value; in __do_notify()
683 /* map current pid/uid into info->owner's namespaces */ in __do_notify()
685 ns_of_pid(info->notify_owner)); in __do_notify()
686 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, in __do_notify()
695 task = pid_task(info->notify_owner, PIDTYPE_TGID); in __do_notify()
696 if (task && task->self_exec_id == in __do_notify()
697 info->notify_self_exec_id) { in __do_notify()
698 do_send_sig_info(info->notify.sigev_signo, in __do_notify()
705 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); in __do_notify()
706 netlink_sendskb(info->notify_sock, info->notify_cookie); in __do_notify()
710 put_pid(info->notify_owner); in __do_notify()
711 put_user_ns(info->notify_user_ns); in __do_notify()
712 info->notify_owner = NULL; in __do_notify()
713 info->notify_user_ns = NULL; in __do_notify()
715 wake_up(&info->wait_q); in __do_notify()
722 return -EFAULT; in prepare_timeout()
724 return -EINVAL; in prepare_timeout()
730 if (info->notify_owner != NULL && in remove_notification()
731 info->notify.sigev_notify == SIGEV_THREAD) { in remove_notification()
732 set_cookie(info->notify_cookie, NOTIFY_REMOVED); in remove_notification()
733 netlink_sendskb(info->notify_sock, info->notify_cookie); in remove_notification()
735 put_pid(info->notify_owner); in remove_notification()
736 put_user_ns(info->notify_user_ns); in remove_notification()
737 info->notify_owner = NULL; in remove_notification()
738 info->notify_user_ns = NULL; in remove_notification()
751 return -ENOENT; in prepare_open()
754 audit_inode_parent_hidden(name, dentry->d_parent); in prepare_open()
761 return -EEXIST; in prepare_open()
763 return -EINVAL; in prepare_open()
771 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt; in do_mq_open()
772 struct dentry *root = mnt->mnt_root; in do_mq_open()
789 path.dentry = lookup_one_len(name->name, root, strlen(name->name)); in do_mq_open()
822 return -EFAULT; in SYSCALL_DEFINE4()
833 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; in SYSCALL_DEFINE1()
834 struct vfsmount *mnt = ipc_ns->mq_mnt; in SYSCALL_DEFINE1()
840 audit_inode_parent_hidden(name, mnt->mnt_root); in SYSCALL_DEFINE1()
844 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT); in SYSCALL_DEFINE1()
845 dentry = lookup_one_len(name->name, mnt->mnt_root, in SYSCALL_DEFINE1()
846 strlen(name->name)); in SYSCALL_DEFINE1()
854 err = -ENOENT; in SYSCALL_DEFINE1()
857 err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL); in SYSCALL_DEFINE1()
862 inode_unlock(d_inode(mnt->mnt_root)); in SYSCALL_DEFINE1()
879 * queue spinlock:
881 * - Set pointer to message.
882 * - Queue the receiver task for later wakeup (without the info->lock).
883 * - Update its state to STATE_READY. Now the receiver can continue.
884 * - Wake up the process after the lock is dropped. Should the process wake up
891 /* pipelined_send() - send a message directly to the task waiting in
892 * sys_mq_timedreceive() (without inserting message into a queue).
899 receiver->msg = message; in pipelined_send()
900 list_del(&receiver->list); in pipelined_send()
901 wake_q_add(wake_q, receiver->task); in pipelined_send()
904 * that we can ensure that updating receiver->state is the last in pipelined_send()
907 * yet, at that point we can later have a use-after-free in pipelined_send()
910 receiver->state = STATE_READY; in pipelined_send()
913 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
914 * gets its message and put to the queue (we have one free place for sure). */
922 wake_up_interruptible(&info->wait_q); in pipelined_receive()
925 if (msg_insert(sender->msg, info)) in pipelined_receive()
928 list_del(&sender->list); in pipelined_receive()
929 wake_q_add(wake_q, sender->task); in pipelined_receive()
930 sender->state = STATE_READY; in pipelined_receive()
949 return -EINVAL; in do_mq_timedsend()
960 ret = -EBADF; in do_mq_timedsend()
965 if (unlikely(f.file->f_op != &mqueue_file_operations)) { in do_mq_timedsend()
966 ret = -EBADF; in do_mq_timedsend()
972 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { in do_mq_timedsend()
973 ret = -EBADF; in do_mq_timedsend()
977 if (unlikely(msg_len > info->attr.mq_msgsize)) { in do_mq_timedsend()
978 ret = -EMSGSIZE; in do_mq_timedsend()
989 msg_ptr->m_ts = msg_len; in do_mq_timedsend()
990 msg_ptr->m_type = msg_prio; in do_mq_timedsend()
997 if (!info->node_cache) in do_mq_timedsend()
1000 spin_lock(&info->lock); in do_mq_timedsend()
1002 if (!info->node_cache && new_leaf) { in do_mq_timedsend()
1004 INIT_LIST_HEAD(&new_leaf->msg_list); in do_mq_timedsend()
1005 info->node_cache = new_leaf; in do_mq_timedsend()
1011 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { in do_mq_timedsend()
1012 if (f.file->f_flags & O_NONBLOCK) { in do_mq_timedsend()
1013 ret = -EAGAIN; in do_mq_timedsend()
1020 * wq_sleep must be called with info->lock held, and in do_mq_timedsend()
1030 /* adds message to the queue */ in do_mq_timedsend()
1036 inode->i_atime = inode->i_mtime = inode->i_ctime = in do_mq_timedsend()
1040 spin_unlock(&info->lock); in do_mq_timedsend()
1073 ret = -EBADF; in do_mq_timedreceive()
1078 if (unlikely(f.file->f_op != &mqueue_file_operations)) { in do_mq_timedreceive()
1079 ret = -EBADF; in do_mq_timedreceive()
1085 if (unlikely(!(f.file->f_mode & FMODE_READ))) { in do_mq_timedreceive()
1086 ret = -EBADF; in do_mq_timedreceive()
1091 if (unlikely(msg_len < info->attr.mq_msgsize)) { in do_mq_timedreceive()
1092 ret = -EMSGSIZE; in do_mq_timedreceive()
1101 if (!info->node_cache) in do_mq_timedreceive()
1104 spin_lock(&info->lock); in do_mq_timedreceive()
1106 if (!info->node_cache && new_leaf) { in do_mq_timedreceive()
1108 INIT_LIST_HEAD(&new_leaf->msg_list); in do_mq_timedreceive()
1109 info->node_cache = new_leaf; in do_mq_timedreceive()
1114 if (info->attr.mq_curmsgs == 0) { in do_mq_timedreceive()
1115 if (f.file->f_flags & O_NONBLOCK) { in do_mq_timedreceive()
1116 spin_unlock(&info->lock); in do_mq_timedreceive()
1117 ret = -EAGAIN; in do_mq_timedreceive()
1129 inode->i_atime = inode->i_mtime = inode->i_ctime = in do_mq_timedreceive()
1132 /* There is now free space in queue. */ in do_mq_timedreceive()
1134 spin_unlock(&info->lock); in do_mq_timedreceive()
1139 ret = msg_ptr->m_ts; in do_mq_timedreceive()
1141 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || in do_mq_timedreceive()
1142 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { in do_mq_timedreceive()
1143 ret = -EFAULT; in do_mq_timedreceive()
1200 if (unlikely(notification->sigev_notify != SIGEV_NONE && in do_mq_notify()
1201 notification->sigev_notify != SIGEV_SIGNAL && in do_mq_notify()
1202 notification->sigev_notify != SIGEV_THREAD)) in do_mq_notify()
1203 return -EINVAL; in do_mq_notify()
1204 if (notification->sigev_notify == SIGEV_SIGNAL && in do_mq_notify()
1205 !valid_signal(notification->sigev_signo)) { in do_mq_notify()
1206 return -EINVAL; in do_mq_notify()
1208 if (notification->sigev_notify == SIGEV_THREAD) { in do_mq_notify()
1214 ret = -ENOMEM; in do_mq_notify()
1217 if (copy_from_user(nc->data, in do_mq_notify()
1218 notification->sigev_value.sival_ptr, in do_mq_notify()
1220 ret = -EFAULT; in do_mq_notify()
1228 f = fdget(notification->sigev_signo); in do_mq_notify()
1230 ret = -EBADF; in do_mq_notify()
1257 ret = -EBADF; in do_mq_notify()
1262 if (unlikely(f.file->f_op != &mqueue_file_operations)) { in do_mq_notify()
1263 ret = -EBADF; in do_mq_notify()
1269 spin_lock(&info->lock); in do_mq_notify()
1271 if (info->notify_owner == task_tgid(current)) { in do_mq_notify()
1273 inode->i_atime = inode->i_ctime = current_time(inode); in do_mq_notify()
1275 } else if (info->notify_owner != NULL) { in do_mq_notify()
1276 ret = -EBUSY; in do_mq_notify()
1278 switch (notification->sigev_notify) { in do_mq_notify()
1280 info->notify.sigev_notify = SIGEV_NONE; in do_mq_notify()
1283 info->notify_sock = sock; in do_mq_notify()
1284 info->notify_cookie = nc; in do_mq_notify()
1287 info->notify.sigev_notify = SIGEV_THREAD; in do_mq_notify()
1290 info->notify.sigev_signo = notification->sigev_signo; in do_mq_notify()
1291 info->notify.sigev_value = notification->sigev_value; in do_mq_notify()
1292 info->notify.sigev_notify = SIGEV_SIGNAL; in do_mq_notify()
1293 info->notify_self_exec_id = current->self_exec_id; in do_mq_notify()
1297 info->notify_owner = get_pid(task_tgid(current)); in do_mq_notify()
1298 info->notify_user_ns = get_user_ns(current_user_ns()); in do_mq_notify()
1299 inode->i_atime = inode->i_ctime = current_time(inode); in do_mq_notify()
1301 spin_unlock(&info->lock); in do_mq_notify()
1319 return -EFAULT; in SYSCALL_DEFINE2()
1331 if (new && (new->mq_flags & (~O_NONBLOCK))) in do_mq_getsetattr()
1332 return -EINVAL; in do_mq_getsetattr()
1336 return -EBADF; in do_mq_getsetattr()
1338 if (unlikely(f.file->f_op != &mqueue_file_operations)) { in do_mq_getsetattr()
1340 return -EBADF; in do_mq_getsetattr()
1346 spin_lock(&info->lock); in do_mq_getsetattr()
1349 *old = info->attr; in do_mq_getsetattr()
1350 old->mq_flags = f.file->f_flags & O_NONBLOCK; in do_mq_getsetattr()
1354 spin_lock(&f.file->f_lock); in do_mq_getsetattr()
1355 if (new->mq_flags & O_NONBLOCK) in do_mq_getsetattr()
1356 f.file->f_flags |= O_NONBLOCK; in do_mq_getsetattr()
1358 f.file->f_flags &= ~O_NONBLOCK; in do_mq_getsetattr()
1359 spin_unlock(&f.file->f_lock); in do_mq_getsetattr()
1361 inode->i_atime = inode->i_ctime = current_time(inode); in do_mq_getsetattr()
1364 spin_unlock(&info->lock); in do_mq_getsetattr()
1380 return -EFAULT; in SYSCALL_DEFINE3()
1390 return -EFAULT; in SYSCALL_DEFINE3()
1397 compat_long_t mq_flags; /* message queue flags */
1410 return -EFAULT; in get_compat_mq_attr()
1413 attr->mq_flags = v.mq_flags; in get_compat_mq_attr()
1414 attr->mq_maxmsg = v.mq_maxmsg; in get_compat_mq_attr()
1415 attr->mq_msgsize = v.mq_msgsize; in get_compat_mq_attr()
1416 attr->mq_curmsgs = v.mq_curmsgs; in get_compat_mq_attr()
1426 v.mq_flags = attr->mq_flags; in put_compat_mq_attr()
1427 v.mq_maxmsg = attr->mq_maxmsg; in put_compat_mq_attr()
1428 v.mq_msgsize = attr->mq_msgsize; in put_compat_mq_attr()
1429 v.mq_curmsgs = attr->mq_curmsgs; in put_compat_mq_attr()
1431 return -EFAULT; in put_compat_mq_attr()
1443 return -EFAULT; in COMPAT_SYSCALL_DEFINE4()
1454 return -EFAULT; in COMPAT_SYSCALL_DEFINE2()
1473 return -EFAULT; in COMPAT_SYSCALL_DEFINE3()
1483 return -EFAULT; in COMPAT_SYSCALL_DEFINE3()
1493 return -EFAULT; in compat_prepare_timeout()
1495 return -EINVAL; in compat_prepare_timeout()
1559 ns->mq_queues_count = 0; in mq_init_ns()
1560 ns->mq_queues_max = DFLT_QUEUESMAX; in mq_init_ns()
1561 ns->mq_msg_max = DFLT_MSGMAX; in mq_init_ns()
1562 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; in mq_init_ns()
1563 ns->mq_msg_default = DFLT_MSG; in mq_init_ns()
1564 ns->mq_msgsize_default = DFLT_MSGSIZE; in mq_init_ns()
1566 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); in mq_init_ns()
1567 if (IS_ERR(ns->mq_mnt)) { in mq_init_ns()
1568 int err = PTR_ERR(ns->mq_mnt); in mq_init_ns()
1569 ns->mq_mnt = NULL; in mq_init_ns()
1577 ns->mq_mnt->mnt_sb->s_fs_info = NULL; in mq_clear_sbinfo()
1582 kern_unmount(ns->mq_mnt); in mq_put_mnt()
1593 return -ENOMEM; in init_mqueue_fs()
1595 /* ignore failures - they are not fatal */ in init_mqueue_fs()