Lines Matching +full:rcu +full:- +full:big +full:- +full:endian +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0
9 * SMP-threaded, sysctl's added
30 * - FIFO ordering for semop() operations (just FIFO, not starvation
32 * - multiple semaphore operations that alter the same semaphore in
34 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
36 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
37 * - undo adjustments at process exit are limited to 0..SEMVMX.
38 * - namespace are supported.
39 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
41 * - statistics about the usage are reported in /proc/sysvipc/sem.
44 * - scalability:
45 * - all global variables are read-mostly.
46 * - semop() calls and semctl(RMID) are synchronized by RCU.
47 * - most operations do write operations (actually: spin_lock calls) to
48 * the per-semaphore array structure.
52 * - semncnt and semzcnt are calculated on demand in count_semcnt()
53 * - the task that performs a successful semop() scans the list of all
57 * - To improve the scalability, the actual wake-up calls are performed after
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
63 * - UNDO values are stored in an array (one per process and per
67 * - There are two lists of the pending operations: a per-array list
68 * and per-semaphore list (stored in the array). This allows to achieve FIFO
70 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
100 * - semop
101 * - semctl, via SETVAL and SETALL.
102 * - at task exit when performing undo adjustments (see exit_sem).
105 spinlock_t lock; /* spinlock for fine-grained semtimedop */
106 struct list_head pending_alter; /* pending single-sop operations */
108 struct list_head pending_const; /* pending single-sop operations */
147 struct list_head list_proc; /* per-process list: *
149 * rcu protected */
150 struct rcu_head rcu; /* rcu struct for sem_undo */ member
169 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
200 * * undo_list->lock for write
201 * * rcu for read
211 * Setting it from non-zero to 0 is a RELEASE, this is ensured by
214 * Testing if it is non-zero is an ACQUIRE, this is ensured by using
216 * Setting it from 0 to non-zero must be ordered with regards to
218 * is inside a spin_lock() and after a write from 0 to non-zero a
233 * 3) current->state:
234 * current->state is set to TASK_INTERRUPTIBLE while holding sem_lock().
249 ns->sc_semmsl = SEMMSL; in sem_init_ns()
250 ns->sc_semmns = SEMMNS; in sem_init_ns()
251 ns->sc_semopm = SEMOPM; in sem_init_ns()
252 ns->sc_semmni = SEMMNI; in sem_init_ns()
253 ns->used_sems = 0; in sem_init_ns()
254 ipc_init_ids(&ns->ids[IPC_SEM_IDS]); in sem_init_ns()
261 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr); in sem_exit_ns()
262 rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht); in sem_exit_ns()
275 * unmerge_queues - unmerge queues, if possible.
286 if (sma->complex_count) in unmerge_queues()
290 * Move all pending operation back into the per-semaphore in unmerge_queues()
293 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { in unmerge_queues()
295 curr = &sma->sems[q->sops[0].sem_num]; in unmerge_queues()
297 list_add_tail(&q->list, &curr->pending_alter); in unmerge_queues()
299 INIT_LIST_HEAD(&sma->pending_alter); in unmerge_queues()
303 * merge_queues - merge single semop queues into global queue
306 * This function merges all per-semaphore queues into the global queue.
307 * It is necessary to achieve FIFO ordering for the pending single-sop
308 * operations when a multi-semop operation must sleep.
314 for (i = 0; i < sma->sem_nsems; i++) { in merge_queues()
315 struct sem *sem = &sma->sems[i]; in merge_queues()
317 list_splice_init(&sem->pending_alter, &sma->pending_alter); in merge_queues()
323 struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); in sem_rcu_free()
326 security_sem_free(&sma->sem_perm); in sem_rcu_free()
331 * Enter the mode suitable for non-simple operations:
339 if (sma->use_global_lock > 0) { in complexmode_enter()
345 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; in complexmode_enter()
348 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; in complexmode_enter()
350 for (i = 0; i < sma->sem_nsems; i++) { in complexmode_enter()
351 sem = &sma->sems[i]; in complexmode_enter()
352 spin_lock(&sem->lock); in complexmode_enter()
353 spin_unlock(&sem->lock); in complexmode_enter()
363 if (sma->complex_count) { in complexmode_tryleave()
369 if (sma->use_global_lock == 1) { in complexmode_tryleave()
372 smp_store_release(&sma->use_global_lock, 0); in complexmode_tryleave()
374 sma->use_global_lock--; in complexmode_tryleave()
378 #define SEM_GLOBAL_LOCK (-1)
393 /* Complex operation - acquire a full lock */ in sem_lock()
394 ipc_lock_object(&sma->sem_perm); in sem_lock()
402 * Only one semaphore affected - try to optimize locking. in sem_lock()
408 idx = array_index_nospec(sops->sem_num, sma->sem_nsems); in sem_lock()
409 sem = &sma->sems[idx]; in sem_lock()
415 if (!sma->use_global_lock) { in sem_lock()
418 * Acquire the per-semaphore lock. in sem_lock()
420 spin_lock(&sem->lock); in sem_lock()
423 if (!smp_load_acquire(&sma->use_global_lock)) { in sem_lock()
425 return sops->sem_num; in sem_lock()
427 spin_unlock(&sem->lock); in sem_lock()
431 ipc_lock_object(&sma->sem_perm); in sem_lock()
433 if (sma->use_global_lock == 0) { in sem_lock()
436 * sma->sem_perm.lock. Thus we must switch to locking in sem_lock()
437 * with sem->lock. in sem_lock()
439 * sma->use_global_lock after we have acquired sem->lock: in sem_lock()
440 * We own sma->sem_perm.lock, thus use_global_lock cannot in sem_lock()
443 spin_lock(&sem->lock); in sem_lock()
445 ipc_unlock_object(&sma->sem_perm); in sem_lock()
446 return sops->sem_num; in sem_lock()
451 * the caller that has set use_global_mode to non-zero. in sem_lock()
462 ipc_unlock_object(&sma->sem_perm); in sem_unlock()
464 struct sem *sem = &sma->sems[locknum]; in sem_unlock()
465 spin_unlock(&sem->lock); in sem_unlock()
473 * The caller holds the RCU read lock.
498 sem_lock(sma, NULL, -1); in sem_lock_and_putref()
499 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); in sem_lock_and_putref()
504 ipc_rmid(&sem_ids(ns), &s->sem_perm); in sem_rmid()
511 if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) in sem_alloc()
522 * newary - Create a new semaphore set
532 key_t key = params->key; in newary()
533 int nsems = params->u.nsems; in newary()
534 int semflg = params->flg; in newary()
538 return -EINVAL; in newary()
539 if (ns->used_sems + nsems > ns->sc_semmns) in newary()
540 return -ENOSPC; in newary()
544 return -ENOMEM; in newary()
546 sma->sem_perm.mode = (semflg & S_IRWXUGO); in newary()
547 sma->sem_perm.key = key; in newary()
549 sma->sem_perm.security = NULL; in newary()
550 retval = security_sem_alloc(&sma->sem_perm); in newary()
557 INIT_LIST_HEAD(&sma->sems[i].pending_alter); in newary()
558 INIT_LIST_HEAD(&sma->sems[i].pending_const); in newary()
559 spin_lock_init(&sma->sems[i].lock); in newary()
562 sma->complex_count = 0; in newary()
563 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; in newary()
564 INIT_LIST_HEAD(&sma->pending_alter); in newary()
565 INIT_LIST_HEAD(&sma->pending_const); in newary()
566 INIT_LIST_HEAD(&sma->list_id); in newary()
567 sma->sem_nsems = nsems; in newary()
568 sma->sem_ctime = ktime_get_real_seconds(); in newary()
571 retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); in newary()
573 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); in newary()
576 ns->used_sems += nsems; in newary()
578 sem_unlock(sma, -1); in newary()
581 return sma->sem_perm.id; in newary()
593 if (params->u.nsems > sma->sem_nsems) in sem_more_checks()
594 return -EINVAL; in sem_more_checks()
609 ns = current->nsproxy->ipc_ns; in ksys_semget()
611 if (nsems < 0 || nsems > ns->sc_semmsl) in ksys_semget()
612 return -EINVAL; in ksys_semget()
627 * perform_atomic_semop[_slow] - Attempt to perform semaphore
636 * (2) 0 (wait-for-zero operation): semval is non-zero.
652 sops = q->sops; in perform_atomic_semop_slow()
653 nsops = q->nsops; in perform_atomic_semop_slow()
654 un = q->undo; in perform_atomic_semop_slow()
657 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems); in perform_atomic_semop_slow()
658 curr = &sma->sems[idx]; in perform_atomic_semop_slow()
659 sem_op = sop->sem_op; in perform_atomic_semop_slow()
660 result = curr->semval; in perform_atomic_semop_slow()
671 if (sop->sem_flg & SEM_UNDO) { in perform_atomic_semop_slow()
672 int undo = un->semadj[sop->sem_num] - sem_op; in perform_atomic_semop_slow()
674 if (undo < (-SEMAEM - 1) || undo > SEMAEM) in perform_atomic_semop_slow()
676 un->semadj[sop->sem_num] = undo; in perform_atomic_semop_slow()
679 curr->semval = result; in perform_atomic_semop_slow()
682 sop--; in perform_atomic_semop_slow()
683 pid = q->pid; in perform_atomic_semop_slow()
685 ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid); in perform_atomic_semop_slow()
686 sop--; in perform_atomic_semop_slow()
692 result = -ERANGE; in perform_atomic_semop_slow()
696 q->blocking = sop; in perform_atomic_semop_slow()
698 if (sop->sem_flg & IPC_NOWAIT) in perform_atomic_semop_slow()
699 result = -EAGAIN; in perform_atomic_semop_slow()
704 sop--; in perform_atomic_semop_slow()
706 sem_op = sop->sem_op; in perform_atomic_semop_slow()
707 sma->sems[sop->sem_num].semval -= sem_op; in perform_atomic_semop_slow()
708 if (sop->sem_flg & SEM_UNDO) in perform_atomic_semop_slow()
709 un->semadj[sop->sem_num] += sem_op; in perform_atomic_semop_slow()
710 sop--; in perform_atomic_semop_slow()
724 sops = q->sops; in perform_atomic_semop()
725 nsops = q->nsops; in perform_atomic_semop()
726 un = q->undo; in perform_atomic_semop()
728 if (unlikely(q->dupsop)) in perform_atomic_semop()
738 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems); in perform_atomic_semop()
740 curr = &sma->sems[idx]; in perform_atomic_semop()
741 sem_op = sop->sem_op; in perform_atomic_semop()
742 result = curr->semval; in perform_atomic_semop()
745 goto would_block; /* wait-for-zero */ in perform_atomic_semop()
752 return -ERANGE; in perform_atomic_semop()
754 if (sop->sem_flg & SEM_UNDO) { in perform_atomic_semop()
755 int undo = un->semadj[sop->sem_num] - sem_op; in perform_atomic_semop()
758 if (undo < (-SEMAEM - 1) || undo > SEMAEM) in perform_atomic_semop()
759 return -ERANGE; in perform_atomic_semop()
764 curr = &sma->sems[sop->sem_num]; in perform_atomic_semop()
765 sem_op = sop->sem_op; in perform_atomic_semop()
766 result = curr->semval; in perform_atomic_semop()
768 if (sop->sem_flg & SEM_UNDO) { in perform_atomic_semop()
769 int undo = un->semadj[sop->sem_num] - sem_op; in perform_atomic_semop()
771 un->semadj[sop->sem_num] = undo; in perform_atomic_semop()
773 curr->semval += sem_op; in perform_atomic_semop()
774 ipc_update_pid(&curr->sempid, q->pid); in perform_atomic_semop()
780 q->blocking = sop; in perform_atomic_semop()
781 return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1; in perform_atomic_semop()
789 sleeper = get_task_struct(q->sleeper); in wake_up_sem_queue_prepare()
792 smp_store_release(&q->status, error); in wake_up_sem_queue_prepare()
799 list_del(&q->list); in unlink_queue()
800 if (q->nsops > 1) in unlink_queue()
801 sma->complex_count--; in unlink_queue()
812 * Note that wait-for-zero operations are handled without restart.
817 if (!list_empty(&sma->pending_alter)) in check_restart()
821 if (q->nsops > 1) in check_restart()
825 * - complex operations always restart. in check_restart()
826 * - wait-for-zero are handled seperately. in check_restart()
827 * - q is a previously sleeping simple operation that in check_restart()
830 * - If there are older (higher priority) decrements in check_restart()
833 * decremented to value - thus they won't proceed either. in check_restart()
839 * wake_const_ops - wake up non-alter tasks
842 * @wake_q: lockless wake-queue head.
846 * be called with semnum = -1, as well as with the number of each modified
849 * is stored in q->pid.
859 if (semnum == -1) in wake_const_ops()
860 pending_list = &sma->pending_const; in wake_const_ops()
862 pending_list = &sma->sems[semnum].pending_const; in wake_const_ops()
881 * do_smart_wakeup_zero - wakeup all wait for zero tasks
885 * @wake_q: lockless wake-queue head
887 * Checks all required queue for wait-for-zero operations, based
898 /* first: the per-semaphore queues, if known */ in do_smart_wakeup_zero()
903 if (sma->sems[num].semval == 0) { in do_smart_wakeup_zero()
913 for (i = 0; i < sma->sem_nsems; i++) { in do_smart_wakeup_zero()
914 if (sma->sems[i].semval == 0) { in do_smart_wakeup_zero()
925 semop_completed |= wake_const_ops(sma, -1, wake_q); in do_smart_wakeup_zero()
932 * update_queue - look for tasks that can be completed.
935 * @wake_q: lockless wake-queue head.
939 * be called with semnum = -1, as well as with the number of each modified
942 * is stored in q->pid.
953 if (semnum == -1) in update_queue()
954 pending_list = &sma->pending_alter; in update_queue()
956 pending_list = &sma->sems[semnum].pending_alter; in update_queue()
962 /* If we are scanning the single sop, per-semaphore list of in update_queue()
969 if (semnum != -1 && sma->sems[semnum].semval == 0) in update_queue()
974 /* Does q->sleeper still need to sleep? */ in update_queue()
984 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q); in update_queue()
996 * set_semotime - set sem_otime
1006 sma->sems[0].sem_otime = ktime_get_real_seconds(); in set_semotime()
1008 sma->sems[sops[0].sem_num].sem_otime = in set_semotime()
1014 * do_smart_update - optimized update_queue
1019 * @wake_q: lockless wake-queue head
1023 * Note that the function does not do the actual wake-up: the caller is
1034 if (!list_empty(&sma->pending_alter)) { in do_smart_update()
1035 /* semaphore array uses the global queue - just process it. */ in do_smart_update()
1036 otime |= update_queue(sma, -1, wake_q); in do_smart_update()
1043 for (i = 0; i < sma->sem_nsems; i++) in do_smart_update()
1048 * - No complex ops, thus all sleeping ops are in do_smart_update()
1050 * - if we decreased the value, then any sleeping in do_smart_update()
1073 struct sembuf *sop = q->blocking; in check_qop()
1084 current->comm, task_pid_nr(current)); in check_qop()
1086 if (sop->sem_num != semnum) in check_qop()
1089 if (count_zero && sop->sem_op == 0) in check_qop()
1091 if (!count_zero && sop->sem_op < 0) in check_qop()
1114 l = &sma->sems[semnum].pending_const; in count_semcnt()
1116 l = &sma->sems[semnum].pending_alter; in count_semcnt()
1119 /* all task on a per-semaphore list sleep on exactly in count_semcnt()
1126 list_for_each_entry(q, &sma->pending_alter, list) { in count_semcnt()
1130 list_for_each_entry(q, &sma->pending_const, list) { in count_semcnt()
1150 ipc_assert_locked_object(&sma->sem_perm); in freeary()
1151 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { in freeary()
1152 list_del(&un->list_id); in freeary()
1153 spin_lock(&un->ulp->lock); in freeary()
1154 un->semid = -1; in freeary()
1155 list_del_rcu(&un->list_proc); in freeary()
1156 spin_unlock(&un->ulp->lock); in freeary()
1157 kfree_rcu(un, rcu); in freeary()
1161 list_for_each_entry_safe(q, tq, &sma->pending_const, list) { in freeary()
1163 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); in freeary()
1166 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { in freeary()
1168 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); in freeary()
1170 for (i = 0; i < sma->sem_nsems; i++) { in freeary()
1171 struct sem *sem = &sma->sems[i]; in freeary()
1172 list_for_each_entry_safe(q, tq, &sem->pending_const, list) { in freeary()
1174 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); in freeary()
1176 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { in freeary()
1178 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); in freeary()
1180 ipc_update_pid(&sem->sempid, NULL); in freeary()
1185 sem_unlock(sma, -1); in freeary()
1189 ns->used_sems -= sma->sem_nsems; in freeary()
1190 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); in freeary()
1204 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); in copy_semid_to_user()
1206 out.sem_otime = in->sem_otime; in copy_semid_to_user()
1207 out.sem_ctime = in->sem_ctime; in copy_semid_to_user()
1208 out.sem_nsems = in->sem_nsems; in copy_semid_to_user()
1213 return -EINVAL; in copy_semid_to_user()
1222 res = sma->sems[0].sem_otime; in get_semotime()
1223 for (i = 1; i < sma->sem_nsems; i++) { in get_semotime()
1224 time64_t to = sma->sems[i].sem_otime; in get_semotime()
1258 audit_ipc_obj(&sma->sem_perm); in semctl_stat()
1260 err = -EACCES; in semctl_stat()
1261 if (ipcperms(ns, &sma->sem_perm, S_IRUGO)) in semctl_stat()
1265 err = security_sem_semctl(&sma->sem_perm, cmd); in semctl_stat()
1269 ipc_lock_object(&sma->sem_perm); in semctl_stat()
1271 if (!ipc_valid_object(&sma->sem_perm)) { in semctl_stat()
1272 ipc_unlock_object(&sma->sem_perm); in semctl_stat()
1273 err = -EIDRM; in semctl_stat()
1277 kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm); in semctl_stat()
1279 semid64->sem_otime = semotime; in semctl_stat()
1280 semid64->sem_ctime = sma->sem_ctime; in semctl_stat()
1282 semid64->sem_otime_high = semotime >> 32; in semctl_stat()
1283 semid64->sem_ctime_high = sma->sem_ctime >> 32; in semctl_stat()
1285 semid64->sem_nsems = sma->sem_nsems; in semctl_stat()
1298 err = sma->sem_perm.id; in semctl_stat()
1300 ipc_unlock_object(&sma->sem_perm); in semctl_stat()
1318 seminfo.semmni = ns->sc_semmni; in semctl_info()
1319 seminfo.semmns = ns->sc_semmns; in semctl_info()
1320 seminfo.semmsl = ns->sc_semmsl; in semctl_info()
1321 seminfo.semopm = ns->sc_semopm; in semctl_info()
1329 seminfo.semaem = ns->used_sems; in semctl_info()
1337 return -EFAULT; in semctl_info()
1351 return -ERANGE; in semctl_setval()
1360 if (semnum < 0 || semnum >= sma->sem_nsems) { in semctl_setval()
1362 return -EINVAL; in semctl_setval()
1366 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) { in semctl_setval()
1368 return -EACCES; in semctl_setval()
1371 err = security_sem_semctl(&sma->sem_perm, SETVAL); in semctl_setval()
1374 return -EACCES; in semctl_setval()
1377 sem_lock(sma, NULL, -1); in semctl_setval()
1379 if (!ipc_valid_object(&sma->sem_perm)) { in semctl_setval()
1380 sem_unlock(sma, -1); in semctl_setval()
1382 return -EIDRM; in semctl_setval()
1385 semnum = array_index_nospec(semnum, sma->sem_nsems); in semctl_setval()
1386 curr = &sma->sems[semnum]; in semctl_setval()
1388 ipc_assert_locked_object(&sma->sem_perm); in semctl_setval()
1389 list_for_each_entry(un, &sma->list_id, list_id) in semctl_setval()
1390 un->semadj[semnum] = 0; in semctl_setval()
1392 curr->semval = val; in semctl_setval()
1393 ipc_update_pid(&curr->sempid, task_tgid(current)); in semctl_setval()
1394 sma->sem_ctime = ktime_get_real_seconds(); in semctl_setval()
1395 /* maybe some queued-up processes were waiting for this */ in semctl_setval()
1397 sem_unlock(sma, -1); in semctl_setval()
1420 nsems = sma->sem_nsems; in semctl_main()
1422 err = -EACCES; in semctl_main()
1423 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO)) in semctl_main()
1426 err = security_sem_semctl(&sma->sem_perm, cmd); in semctl_main()
1430 err = -EACCES; in semctl_main()
1437 sem_lock(sma, NULL, -1); in semctl_main()
1438 if (!ipc_valid_object(&sma->sem_perm)) { in semctl_main()
1439 err = -EIDRM; in semctl_main()
1443 if (!ipc_rcu_getref(&sma->sem_perm)) { in semctl_main()
1444 err = -EIDRM; in semctl_main()
1447 sem_unlock(sma, -1); in semctl_main()
1452 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); in semctl_main()
1453 return -ENOMEM; in semctl_main()
1458 if (!ipc_valid_object(&sma->sem_perm)) { in semctl_main()
1459 err = -EIDRM; in semctl_main()
1463 for (i = 0; i < sma->sem_nsems; i++) in semctl_main()
1464 sem_io[i] = sma->sems[i].semval; in semctl_main()
1465 sem_unlock(sma, -1); in semctl_main()
1469 err = -EFAULT; in semctl_main()
1477 if (!ipc_rcu_getref(&sma->sem_perm)) { in semctl_main()
1478 err = -EIDRM; in semctl_main()
1487 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); in semctl_main()
1488 return -ENOMEM; in semctl_main()
1493 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); in semctl_main()
1494 err = -EFAULT; in semctl_main()
1500 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); in semctl_main()
1501 err = -ERANGE; in semctl_main()
1507 if (!ipc_valid_object(&sma->sem_perm)) { in semctl_main()
1508 err = -EIDRM; in semctl_main()
1513 sma->sems[i].semval = sem_io[i]; in semctl_main()
1514 ipc_update_pid(&sma->sems[i].sempid, task_tgid(current)); in semctl_main()
1517 ipc_assert_locked_object(&sma->sem_perm); in semctl_main()
1518 list_for_each_entry(un, &sma->list_id, list_id) { in semctl_main()
1520 un->semadj[i] = 0; in semctl_main()
1522 sma->sem_ctime = ktime_get_real_seconds(); in semctl_main()
1523 /* maybe some queued-up processes were waiting for this */ in semctl_main()
1528 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */ in semctl_main()
1530 err = -EINVAL; in semctl_main()
1534 sem_lock(sma, NULL, -1); in semctl_main()
1535 if (!ipc_valid_object(&sma->sem_perm)) { in semctl_main()
1536 err = -EIDRM; in semctl_main()
1541 curr = &sma->sems[semnum]; in semctl_main()
1545 err = curr->semval; in semctl_main()
1548 err = pid_vnr(curr->sempid); in semctl_main()
1559 sem_unlock(sma, -1); in semctl_main()
1575 return -EFAULT; in copy_semid_from_user()
1582 return -EFAULT; in copy_semid_from_user()
1584 out->sem_perm.uid = tbuf_old.sem_perm.uid; in copy_semid_from_user()
1585 out->sem_perm.gid = tbuf_old.sem_perm.gid; in copy_semid_from_user()
1586 out->sem_perm.mode = tbuf_old.sem_perm.mode; in copy_semid_from_user()
1591 return -EINVAL; in copy_semid_from_user()
1611 &semid64->sem_perm, 0); in semctl_down()
1619 err = security_sem_semctl(&sma->sem_perm, cmd); in semctl_down()
1625 sem_lock(sma, NULL, -1); in semctl_down()
1626 /* freeary unlocks the ipc object and rcu */ in semctl_down()
1630 sem_lock(sma, NULL, -1); in semctl_down()
1631 err = ipc_update_perm(&semid64->sem_perm, ipcp); in semctl_down()
1634 sma->sem_ctime = ktime_get_real_seconds(); in semctl_down()
1637 err = -EINVAL; in semctl_down()
1642 sem_unlock(sma, -1); in semctl_down()
1658 return -EINVAL; in ksys_semctl()
1660 ns = current->nsproxy->ipc_ns; in ksys_semctl()
1673 err = -EFAULT; in ksys_semctl()
1685 /* big-endian 64bit */ in ksys_semctl()
1688 /* 32bit or little-endian 64bit */ in ksys_semctl()
1695 return -EFAULT; in ksys_semctl()
1700 return -EINVAL; in ksys_semctl()
1742 return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm); in copy_compat_semid_from_user()
1745 return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm); in copy_compat_semid_from_user()
1755 to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm); in copy_compat_semid_to_user()
1756 v.sem_otime = lower_32_bits(in->sem_otime); in copy_compat_semid_to_user()
1757 v.sem_otime_high = upper_32_bits(in->sem_otime); in copy_compat_semid_to_user()
1758 v.sem_ctime = lower_32_bits(in->sem_ctime); in copy_compat_semid_to_user()
1759 v.sem_ctime_high = upper_32_bits(in->sem_ctime); in copy_compat_semid_to_user()
1760 v.sem_nsems = in->sem_nsems; in copy_compat_semid_to_user()
1765 to_compat_ipc_perm(&v.sem_perm, &in->sem_perm); in copy_compat_semid_to_user()
1766 v.sem_otime = in->sem_otime; in copy_compat_semid_to_user()
1767 v.sem_ctime = in->sem_ctime; in copy_compat_semid_to_user()
1768 v.sem_nsems = in->sem_nsems; in copy_compat_semid_to_user()
1780 ns = current->nsproxy->ipc_ns; in compat_ksys_semctl()
1783 return -EINVAL; in compat_ksys_semctl()
1796 err = -EFAULT; in compat_ksys_semctl()
1809 return -EFAULT; in compat_ksys_semctl()
1814 return -EINVAL; in compat_ksys_semctl()
1853 undo_list = current->sysvsem.undo_list; in get_undo_list()
1857 return -ENOMEM; in get_undo_list()
1858 spin_lock_init(&undo_list->lock); in get_undo_list()
1859 refcount_set(&undo_list->refcnt, 1); in get_undo_list()
1860 INIT_LIST_HEAD(&undo_list->list_proc); in get_undo_list()
1862 current->sysvsem.undo_list = undo_list; in get_undo_list()
1872 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc, in __lookup_undo()
1873 spin_is_locked(&ulp->lock)) { in __lookup_undo()
1874 if (un->semid == semid) in __lookup_undo()
1884 assert_spin_locked(&ulp->lock); in lookup_undo()
1888 list_del_rcu(&un->list_proc); in lookup_undo()
1889 list_add_rcu(&un->list_proc, &ulp->list_proc); in lookup_undo()
1895 * find_alloc_undo - lookup (and if not present create) undo array
1902 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1917 spin_lock(&ulp->lock); in find_alloc_undo()
1919 spin_unlock(&ulp->lock); in find_alloc_undo()
1923 /* no undo structure around - allocate one. */ in find_alloc_undo()
1931 nsems = sma->sem_nsems; in find_alloc_undo()
1932 if (!ipc_rcu_getref(&sma->sem_perm)) { in find_alloc_undo()
1934 un = ERR_PTR(-EIDRM); in find_alloc_undo()
1942 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); in find_alloc_undo()
1943 return ERR_PTR(-ENOMEM); in find_alloc_undo()
1949 if (!ipc_valid_object(&sma->sem_perm)) { in find_alloc_undo()
1950 sem_unlock(sma, -1); in find_alloc_undo()
1953 un = ERR_PTR(-EIDRM); in find_alloc_undo()
1956 spin_lock(&ulp->lock); in find_alloc_undo()
1967 new->semadj = (short *) &new[1]; in find_alloc_undo()
1968 new->ulp = ulp; in find_alloc_undo()
1969 new->semid = semid; in find_alloc_undo()
1970 assert_spin_locked(&ulp->lock); in find_alloc_undo()
1971 list_add_rcu(&new->list_proc, &ulp->list_proc); in find_alloc_undo()
1972 ipc_assert_locked_object(&sma->sem_perm); in find_alloc_undo()
1973 list_add(&new->list_id, &sma->list_id); in find_alloc_undo()
1977 spin_unlock(&ulp->lock); in find_alloc_undo()
1978 sem_unlock(sma, -1); in find_alloc_undo()
1986 int error = -EINVAL; in do_semtimedop()
1997 ns = current->nsproxy->ipc_ns; in do_semtimedop()
2000 return -EINVAL; in do_semtimedop()
2001 if (nsops > ns->sc_semopm) in do_semtimedop()
2002 return -E2BIG; in do_semtimedop()
2006 return -ENOMEM; in do_semtimedop()
2010 error = -EFAULT; in do_semtimedop()
2015 if (timeout->tv_sec < 0 || timeout->tv_nsec < 0 || in do_semtimedop()
2016 timeout->tv_nsec >= 1000000000L) { in do_semtimedop()
2017 error = -EINVAL; in do_semtimedop()
2025 unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG); in do_semtimedop() local
2027 if (sop->sem_num >= max) in do_semtimedop()
2028 max = sop->sem_num; in do_semtimedop()
2029 if (sop->sem_flg & SEM_UNDO) in do_semtimedop()
2031 if (dup & mask) { in do_semtimedop()
2040 if (sop->sem_op != 0) { in do_semtimedop()
2042 dup |= mask; in do_semtimedop()
2065 error = -EFBIG; in do_semtimedop()
2066 if (max >= sma->sem_nsems) { in do_semtimedop()
2071 error = -EACCES; in do_semtimedop()
2072 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) { in do_semtimedop()
2077 error = security_sem_semop(&sma->sem_perm, sops, nsops, alter); in do_semtimedop()
2083 error = -EIDRM; in do_semtimedop()
2089 * only a per-semaphore lock is held and it's OK to proceed with the in do_semtimedop()
2093 if (!ipc_valid_object(&sma->sem_perm)) in do_semtimedop()
2096 * semid identifiers are not unique - find_alloc_undo may have in do_semtimedop()
2099 * This case can be detected checking un->semid. The existence of in do_semtimedop()
2100 * "un" itself is guaranteed by rcu. in do_semtimedop()
2102 if (un && un->semid == -1) in do_semtimedop()
2113 if (error == 0) { /* non-blocking succesfull path */ in do_semtimedop()
2131 if (error < 0) /* non-blocking error path */ in do_semtimedop()
2140 int idx = array_index_nospec(sops->sem_num, sma->sem_nsems); in do_semtimedop()
2141 curr = &sma->sems[idx]; in do_semtimedop()
2144 if (sma->complex_count) { in do_semtimedop()
2146 &sma->pending_alter); in do_semtimedop()
2150 &curr->pending_alter); in do_semtimedop()
2153 list_add_tail(&queue.list, &curr->pending_const); in do_semtimedop()
2156 if (!sma->complex_count) in do_semtimedop()
2160 list_add_tail(&queue.list, &sma->pending_alter); in do_semtimedop()
2162 list_add_tail(&queue.list, &sma->pending_const); in do_semtimedop()
2164 sma->complex_count++; in do_semtimedop()
2169 WRITE_ONCE(queue.status, -EINTR); in do_semtimedop()
2195 if (error != -EINTR) { in do_semtimedop()
2204 if (!ipc_valid_object(&sma->sem_perm)) in do_semtimedop()
2213 * If queue.status != -EINTR we are woken up by another process. in do_semtimedop()
2216 if (error != -EINTR) in do_semtimedop()
2223 error = -EAGAIN; in do_semtimedop()
2224 } while (error == -EINTR && !signal_pending(current)); /* spurious */ in do_semtimedop()
2243 return -EFAULT; in ksys_semtimedop()
2263 return -EFAULT; in compat_ksys_semtimedop()
2296 refcount_inc(&undo_list->refcnt); in copy_semundo()
2297 tsk->sysvsem.undo_list = undo_list; in copy_semundo()
2299 tsk->sysvsem.undo_list = NULL; in copy_semundo()
2320 ulp = tsk->sysvsem.undo_list; in exit_sem()
2323 tsk->sysvsem.undo_list = NULL; in exit_sem()
2325 if (!refcount_dec_and_test(&ulp->refcnt)) in exit_sem()
2337 un = list_entry_rcu(ulp->list_proc.next, in exit_sem()
2339 if (&un->list_proc == &ulp->list_proc) { in exit_sem()
2346 spin_lock(&ulp->lock); in exit_sem()
2347 spin_unlock(&ulp->lock); in exit_sem()
2351 spin_lock(&ulp->lock); in exit_sem()
2352 semid = un->semid; in exit_sem()
2353 spin_unlock(&ulp->lock); in exit_sem()
2356 if (semid == -1) { in exit_sem()
2361 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid); in exit_sem()
2368 sem_lock(sma, NULL, -1); in exit_sem()
2370 if (!ipc_valid_object(&sma->sem_perm)) { in exit_sem()
2371 sem_unlock(sma, -1); in exit_sem()
2380 sem_unlock(sma, -1); in exit_sem()
2386 ipc_assert_locked_object(&sma->sem_perm); in exit_sem()
2387 list_del(&un->list_id); in exit_sem()
2389 spin_lock(&ulp->lock); in exit_sem()
2390 list_del_rcu(&un->list_proc); in exit_sem()
2391 spin_unlock(&ulp->lock); in exit_sem()
2394 for (i = 0; i < sma->sem_nsems; i++) { in exit_sem()
2395 struct sem *semaphore = &sma->sems[i]; in exit_sem()
2396 if (un->semadj[i]) { in exit_sem()
2397 semaphore->semval += un->semadj[i]; in exit_sem()
2401 * - Some unices ignore the undo entirely in exit_sem()
2403 * - some cap the value (e.g. FreeBSD caps in exit_sem()
2411 if (semaphore->semval < 0) in exit_sem()
2412 semaphore->semval = 0; in exit_sem()
2413 if (semaphore->semval > SEMVMX) in exit_sem()
2414 semaphore->semval = SEMVMX; in exit_sem()
2415 ipc_update_pid(&semaphore->sempid, task_tgid(current)); in exit_sem()
2418 /* maybe some queued-up processes were waiting for this */ in exit_sem()
2420 sem_unlock(sma, -1); in exit_sem()
2424 kfree_rcu(un, rcu); in exit_sem()
2449 sma->sem_perm.key, in sysvipc_sem_proc_show()
2450 sma->sem_perm.id, in sysvipc_sem_proc_show()
2451 sma->sem_perm.mode, in sysvipc_sem_proc_show()
2452 sma->sem_nsems, in sysvipc_sem_proc_show()
2453 from_kuid_munged(user_ns, sma->sem_perm.uid), in sysvipc_sem_proc_show()
2454 from_kgid_munged(user_ns, sma->sem_perm.gid), in sysvipc_sem_proc_show()
2455 from_kuid_munged(user_ns, sma->sem_perm.cuid), in sysvipc_sem_proc_show()
2456 from_kgid_munged(user_ns, sma->sem_perm.cgid), in sysvipc_sem_proc_show()
2458 sma->sem_ctime); in sysvipc_sem_proc_show()