1 /*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 *
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
7 *
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
12 * Lockless wakeup
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
26 *
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
29 * protection)
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
33 * SETALL calls.
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
40 *
41 * Internals:
42 * - scalability:
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semcnt()
51 * - the task that performs a successful semop() scans the list of all
52 * sleeping tasks and completes any pending operations that can be fulfilled.
53 * Semaphores are actively given to waiting tasks (necessary for FIFO).
54 * (see update_queue())
55 * - To improve the scalability, the actual wake-up calls are performed after
56 * dropping all locks. (see wake_up_sem_queue_prepare(),
57 * wake_up_sem_queue_do())
58 * - All work is done by the waker, the woken up task does not have to do
59 * anything - not even acquiring a lock or dropping a refcount.
60 * - A woken up task may not even touch the semaphore array anymore, it may
61 * have been destroyed already by a semctl(RMID).
62 * - The synchronizations between wake-ups due to a timeout/signal and a
63 * wake-up due to a completed semaphore operation is achieved by using an
64 * intermediate state (IN_WAKEUP).
65 * - UNDO values are stored in an array (one per process and per
66 * semaphore array, lazily allocated). For backwards compatibility, multiple
67 * modes for the UNDO variables are supported (per process, per thread)
68 * (see copy_semundo, CLONE_SYSVSEM)
69 * - There are two lists of the pending operations: a per-array list
70 * and per-semaphore list (stored in the array). This allows to achieve FIFO
71 * ordering without always scanning all pending operations.
72 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
73 */
74
75 #include <linux/slab.h>
76 #include <linux/spinlock.h>
77 #include <linux/init.h>
78 #include <linux/proc_fs.h>
79 #include <linux/time.h>
80 #include <linux/security.h>
81 #include <linux/syscalls.h>
82 #include <linux/audit.h>
83 #include <linux/capability.h>
84 #include <linux/seq_file.h>
85 #include <linux/rwsem.h>
86 #include <linux/nsproxy.h>
87 #include <linux/ipc_namespace.h>
88
89 #include <linux/uaccess.h>
90 #include "util.h"
91
92 /* One semaphore structure for each semaphore in the system. */
93 struct sem {
94 int semval; /* current value */
95 int sempid; /* pid of last operation */
96 spinlock_t lock; /* spinlock for fine-grained semtimedop */
97 struct list_head pending_alter; /* pending single-sop operations */
98 /* that alter the semaphore */
99 struct list_head pending_const; /* pending single-sop operations */
100 /* that do not alter the semaphore*/
101 time_t sem_otime; /* candidate for sem_otime */
102 } ____cacheline_aligned_in_smp;
103
104 /* One queue for each sleeping process in the system. */
105 struct sem_queue {
106 struct list_head list; /* queue of pending operations */
107 struct task_struct *sleeper; /* this process */
108 struct sem_undo *undo; /* undo structure */
109 int pid; /* process id of requesting process */
110 int status; /* completion status of operation */
111 struct sembuf *sops; /* array of pending operations */
112 struct sembuf *blocking; /* the operation that blocked */
113 int nsops; /* number of operations */
114 int alter; /* does *sops alter the array? */
115 };
116
117 /* Each task has a list of undo requests. They are executed automatically
118 * when the process exits.
119 */
120 struct sem_undo {
121 struct list_head list_proc; /* per-process list: *
122 * all undos from one process
123 * rcu protected */
124 struct rcu_head rcu; /* rcu struct for sem_undo */
125 struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
126 struct list_head list_id; /* per semaphore array list:
127 * all undos for one array */
128 int semid; /* semaphore set identifier */
129 short *semadj; /* array of adjustments */
130 /* one per semaphore */
131 };
132
133 /* sem_undo_list controls shared access to the list of sem_undo structures
134 * that may be shared among all a CLONE_SYSVSEM task group.
135 */
136 struct sem_undo_list {
137 atomic_t refcnt;
138 spinlock_t lock;
139 struct list_head list_proc;
140 };
141
142
143 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
144
145 #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
146
147 static int newary(struct ipc_namespace *, struct ipc_params *);
148 static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
149 #ifdef CONFIG_PROC_FS
150 static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
151 #endif
152
153 #define SEMMSL_FAST 256 /* 512 bytes on stack */
154 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
155
156 /*
157 * Locking:
158 * sem_undo.id_next,
159 * sem_array.complex_count,
160 * sem_array.pending{_alter,_cont},
161 * sem_array.sem_undo: global sem_lock() for read/write
162 * sem_undo.proc_next: only "current" is allowed to read/write that field.
163 *
164 * sem_array.sem_base[i].pending_{const,alter}:
165 * global or semaphore sem_lock() for read/write
166 */
167
168 #define sc_semmsl sem_ctls[0]
169 #define sc_semmns sem_ctls[1]
170 #define sc_semopm sem_ctls[2]
171 #define sc_semmni sem_ctls[3]
172
sem_init_ns(struct ipc_namespace * ns)173 void sem_init_ns(struct ipc_namespace *ns)
174 {
175 ns->sc_semmsl = SEMMSL;
176 ns->sc_semmns = SEMMNS;
177 ns->sc_semopm = SEMOPM;
178 ns->sc_semmni = SEMMNI;
179 ns->used_sems = 0;
180 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
181 }
182
183 #ifdef CONFIG_IPC_NS
sem_exit_ns(struct ipc_namespace * ns)184 void sem_exit_ns(struct ipc_namespace *ns)
185 {
186 free_ipcs(ns, &sem_ids(ns), freeary);
187 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
188 }
189 #endif
190
sem_init(void)191 void __init sem_init(void)
192 {
193 sem_init_ns(&init_ipc_ns);
194 ipc_init_proc_interface("sysvipc/sem",
195 " key semid perms nsems uid gid cuid cgid otime ctime\n",
196 IPC_SEM_IDS, sysvipc_sem_proc_show);
197 }
198
199 /**
200 * unmerge_queues - unmerge queues, if possible.
201 * @sma: semaphore array
202 *
203 * The function unmerges the wait queues if complex_count is 0.
204 * It must be called prior to dropping the global semaphore array lock.
205 */
unmerge_queues(struct sem_array * sma)206 static void unmerge_queues(struct sem_array *sma)
207 {
208 struct sem_queue *q, *tq;
209
210 /* complex operations still around? */
211 if (sma->complex_count)
212 return;
213 /*
214 * We will switch back to simple mode.
215 * Move all pending operation back into the per-semaphore
216 * queues.
217 */
218 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
219 struct sem *curr;
220 curr = &sma->sem_base[q->sops[0].sem_num];
221
222 list_add_tail(&q->list, &curr->pending_alter);
223 }
224 INIT_LIST_HEAD(&sma->pending_alter);
225 }
226
227 /**
228 * merge_queues - merge single semop queues into global queue
229 * @sma: semaphore array
230 *
231 * This function merges all per-semaphore queues into the global queue.
232 * It is necessary to achieve FIFO ordering for the pending single-sop
233 * operations when a multi-semop operation must sleep.
234 * Only the alter operations must be moved, the const operations can stay.
235 */
merge_queues(struct sem_array * sma)236 static void merge_queues(struct sem_array *sma)
237 {
238 int i;
239 for (i = 0; i < sma->sem_nsems; i++) {
240 struct sem *sem = sma->sem_base + i;
241
242 list_splice_init(&sem->pending_alter, &sma->pending_alter);
243 }
244 }
245
sem_rcu_free(struct rcu_head * head)246 static void sem_rcu_free(struct rcu_head *head)
247 {
248 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
249 struct sem_array *sma = ipc_rcu_to_struct(p);
250
251 security_sem_free(sma);
252 ipc_rcu_free(head);
253 }
254
255 /*
256 * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
257 * are only control barriers.
258 * The code must pair with spin_unlock(&sem->lock) or
259 * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
260 *
261 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
262 */
263 #define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
264
265 /*
266 * Wait until all currently ongoing simple ops have completed.
267 * Caller must own sem_perm.lock.
268 * New simple ops cannot start, because simple ops first check
269 * that sem_perm.lock is free.
270 * that a) sem_perm.lock is free and b) complex_count is 0.
271 */
sem_wait_array(struct sem_array * sma)272 static void sem_wait_array(struct sem_array *sma)
273 {
274 int i;
275 struct sem *sem;
276
277 if (sma->complex_count) {
278 /* The thread that increased sma->complex_count waited on
279 * all sem->lock locks. Thus we don't need to wait again.
280 */
281 return;
282 }
283
284 for (i = 0; i < sma->sem_nsems; i++) {
285 sem = sma->sem_base + i;
286 spin_unlock_wait(&sem->lock);
287 }
288 ipc_smp_acquire__after_spin_is_unlocked();
289 }
290
291 /*
292 * If the request contains only one semaphore operation, and there are
293 * no complex transactions pending, lock only the semaphore involved.
294 * Otherwise, lock the entire semaphore array, since we either have
295 * multiple semaphores in our own semops, or we need to look at
296 * semaphores from other pending complex operations.
297 */
sem_lock(struct sem_array * sma,struct sembuf * sops,int nsops)298 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
299 int nsops)
300 {
301 struct sem *sem;
302
303 if (nsops != 1) {
304 /* Complex operation - acquire a full lock */
305 ipc_lock_object(&sma->sem_perm);
306
307 /* And wait until all simple ops that are processed
308 * right now have dropped their locks.
309 */
310 sem_wait_array(sma);
311 return -1;
312 }
313
314 /*
315 * Only one semaphore affected - try to optimize locking.
316 * The rules are:
317 * - optimized locking is possible if no complex operation
318 * is either enqueued or processed right now.
319 * - The test for enqueued complex ops is simple:
320 * sma->complex_count != 0
321 * - Testing for complex ops that are processed right now is
322 * a bit more difficult. Complex ops acquire the full lock
323 * and first wait that the running simple ops have completed.
324 * (see above)
325 * Thus: If we own a simple lock and the global lock is free
326 * and complex_count is now 0, then it will stay 0 and
327 * thus just locking sem->lock is sufficient.
328 */
329 sem = sma->sem_base + sops->sem_num;
330
331 if (sma->complex_count == 0) {
332 /*
333 * It appears that no complex operation is around.
334 * Acquire the per-semaphore lock.
335 */
336 spin_lock(&sem->lock);
337
338 /* Then check that the global lock is free */
339 if (!spin_is_locked(&sma->sem_perm.lock)) {
340 /*
341 * We need a memory barrier with acquire semantics,
342 * otherwise we can race with another thread that does:
343 * complex_count++;
344 * spin_unlock(sem_perm.lock);
345 */
346 ipc_smp_acquire__after_spin_is_unlocked();
347
348 /* Now repeat the test of complex_count:
349 * It can't change anymore until we drop sem->lock.
350 * Thus: if is now 0, then it will stay 0.
351 */
352 if (sma->complex_count == 0) {
353 /* fast path successful! */
354 return sops->sem_num;
355 }
356 }
357 spin_unlock(&sem->lock);
358 }
359
360 /* slow path: acquire the full lock */
361 ipc_lock_object(&sma->sem_perm);
362
363 if (sma->complex_count == 0) {
364 /* False alarm:
365 * There is no complex operation, thus we can switch
366 * back to the fast path.
367 */
368 spin_lock(&sem->lock);
369 ipc_unlock_object(&sma->sem_perm);
370 return sops->sem_num;
371 } else {
372 /* Not a false alarm, thus complete the sequence for a
373 * full lock.
374 */
375 sem_wait_array(sma);
376 return -1;
377 }
378 }
379
sem_unlock(struct sem_array * sma,int locknum)380 static inline void sem_unlock(struct sem_array *sma, int locknum)
381 {
382 if (locknum == -1) {
383 unmerge_queues(sma);
384 ipc_unlock_object(&sma->sem_perm);
385 } else {
386 struct sem *sem = sma->sem_base + locknum;
387 spin_unlock(&sem->lock);
388 }
389 }
390
391 /*
392 * sem_lock_(check_) routines are called in the paths where the rwsem
393 * is not held.
394 *
395 * The caller holds the RCU read lock.
396 */
sem_obtain_lock(struct ipc_namespace * ns,int id,struct sembuf * sops,int nsops,int * locknum)397 static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
398 int id, struct sembuf *sops, int nsops, int *locknum)
399 {
400 struct kern_ipc_perm *ipcp;
401 struct sem_array *sma;
402
403 ipcp = ipc_obtain_object(&sem_ids(ns), id);
404 if (IS_ERR(ipcp))
405 return ERR_CAST(ipcp);
406
407 sma = container_of(ipcp, struct sem_array, sem_perm);
408 *locknum = sem_lock(sma, sops, nsops);
409
410 /* ipc_rmid() may have already freed the ID while sem_lock
411 * was spinning: verify that the structure is still valid
412 */
413 if (ipc_valid_object(ipcp))
414 return container_of(ipcp, struct sem_array, sem_perm);
415
416 sem_unlock(sma, *locknum);
417 return ERR_PTR(-EINVAL);
418 }
419
sem_obtain_object(struct ipc_namespace * ns,int id)420 static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
421 {
422 struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
423
424 if (IS_ERR(ipcp))
425 return ERR_CAST(ipcp);
426
427 return container_of(ipcp, struct sem_array, sem_perm);
428 }
429
sem_obtain_object_check(struct ipc_namespace * ns,int id)430 static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
431 int id)
432 {
433 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
434
435 if (IS_ERR(ipcp))
436 return ERR_CAST(ipcp);
437
438 return container_of(ipcp, struct sem_array, sem_perm);
439 }
440
sem_lock_and_putref(struct sem_array * sma)441 static inline void sem_lock_and_putref(struct sem_array *sma)
442 {
443 sem_lock(sma, NULL, -1);
444 ipc_rcu_putref(sma, sem_rcu_free);
445 }
446
sem_rmid(struct ipc_namespace * ns,struct sem_array * s)447 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
448 {
449 ipc_rmid(&sem_ids(ns), &s->sem_perm);
450 }
451
452 /*
453 * Lockless wakeup algorithm:
454 * Without the check/retry algorithm a lockless wakeup is possible:
455 * - queue.status is initialized to -EINTR before blocking.
456 * - wakeup is performed by
457 * * unlinking the queue entry from the pending list
458 * * setting queue.status to IN_WAKEUP
459 * This is the notification for the blocked thread that a
460 * result value is imminent.
461 * * call wake_up_process
462 * * set queue.status to the final value.
463 * - the previously blocked thread checks queue.status:
464 * * if it's IN_WAKEUP, then it must wait until the value changes
465 * * if it's not -EINTR, then the operation was completed by
466 * update_queue. semtimedop can return queue.status without
467 * performing any operation on the sem array.
468 * * otherwise it must acquire the spinlock and check what's up.
469 *
470 * The two-stage algorithm is necessary to protect against the following
471 * races:
472 * - if queue.status is set after wake_up_process, then the woken up idle
473 * thread could race forward and try (and fail) to acquire sma->lock
474 * before update_queue had a chance to set queue.status
475 * - if queue.status is written before wake_up_process and if the
476 * blocked process is woken up by a signal between writing
477 * queue.status and the wake_up_process, then the woken up
478 * process could return from semtimedop and die by calling
479 * sys_exit before wake_up_process is called. Then wake_up_process
480 * will oops, because the task structure is already invalid.
481 * (yes, this happened on s390 with sysv msg).
482 *
483 */
484 #define IN_WAKEUP 1
485
486 /**
487 * newary - Create a new semaphore set
488 * @ns: namespace
489 * @params: ptr to the structure that contains key, semflg and nsems
490 *
491 * Called with sem_ids.rwsem held (as a writer)
492 */
newary(struct ipc_namespace * ns,struct ipc_params * params)493 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
494 {
495 int id;
496 int retval;
497 struct sem_array *sma;
498 int size;
499 key_t key = params->key;
500 int nsems = params->u.nsems;
501 int semflg = params->flg;
502 int i;
503
504 if (!nsems)
505 return -EINVAL;
506 if (ns->used_sems + nsems > ns->sc_semmns)
507 return -ENOSPC;
508
509 size = sizeof(*sma) + nsems * sizeof(struct sem);
510 sma = ipc_rcu_alloc(size);
511 if (!sma)
512 return -ENOMEM;
513
514 memset(sma, 0, size);
515
516 sma->sem_perm.mode = (semflg & S_IRWXUGO);
517 sma->sem_perm.key = key;
518
519 sma->sem_perm.security = NULL;
520 retval = security_sem_alloc(sma);
521 if (retval) {
522 ipc_rcu_putref(sma, ipc_rcu_free);
523 return retval;
524 }
525
526 sma->sem_base = (struct sem *) &sma[1];
527
528 for (i = 0; i < nsems; i++) {
529 INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
530 INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
531 spin_lock_init(&sma->sem_base[i].lock);
532 }
533
534 sma->complex_count = 0;
535 INIT_LIST_HEAD(&sma->pending_alter);
536 INIT_LIST_HEAD(&sma->pending_const);
537 INIT_LIST_HEAD(&sma->list_id);
538 sma->sem_nsems = nsems;
539 sma->sem_ctime = get_seconds();
540
541 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
542 if (id < 0) {
543 ipc_rcu_putref(sma, sem_rcu_free);
544 return id;
545 }
546 ns->used_sems += nsems;
547
548 sem_unlock(sma, -1);
549 rcu_read_unlock();
550
551 return sma->sem_perm.id;
552 }
553
554
555 /*
556 * Called with sem_ids.rwsem and ipcp locked.
557 */
sem_security(struct kern_ipc_perm * ipcp,int semflg)558 static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
559 {
560 struct sem_array *sma;
561
562 sma = container_of(ipcp, struct sem_array, sem_perm);
563 return security_sem_associate(sma, semflg);
564 }
565
566 /*
567 * Called with sem_ids.rwsem and ipcp locked.
568 */
sem_more_checks(struct kern_ipc_perm * ipcp,struct ipc_params * params)569 static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
570 struct ipc_params *params)
571 {
572 struct sem_array *sma;
573
574 sma = container_of(ipcp, struct sem_array, sem_perm);
575 if (params->u.nsems > sma->sem_nsems)
576 return -EINVAL;
577
578 return 0;
579 }
580
SYSCALL_DEFINE3(semget,key_t,key,int,nsems,int,semflg)581 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
582 {
583 struct ipc_namespace *ns;
584 static const struct ipc_ops sem_ops = {
585 .getnew = newary,
586 .associate = sem_security,
587 .more_checks = sem_more_checks,
588 };
589 struct ipc_params sem_params;
590
591 ns = current->nsproxy->ipc_ns;
592
593 if (nsems < 0 || nsems > ns->sc_semmsl)
594 return -EINVAL;
595
596 sem_params.key = key;
597 sem_params.flg = semflg;
598 sem_params.u.nsems = nsems;
599
600 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
601 }
602
603 /**
604 * perform_atomic_semop - Perform (if possible) a semaphore operation
605 * @sma: semaphore array
606 * @q: struct sem_queue that describes the operation
607 *
608 * Returns 0 if the operation was possible.
609 * Returns 1 if the operation is impossible, the caller must sleep.
610 * Negative values are error codes.
611 */
perform_atomic_semop(struct sem_array * sma,struct sem_queue * q)612 static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
613 {
614 int result, sem_op, nsops, pid;
615 struct sembuf *sop;
616 struct sem *curr;
617 struct sembuf *sops;
618 struct sem_undo *un;
619
620 sops = q->sops;
621 nsops = q->nsops;
622 un = q->undo;
623
624 for (sop = sops; sop < sops + nsops; sop++) {
625 curr = sma->sem_base + sop->sem_num;
626 sem_op = sop->sem_op;
627 result = curr->semval;
628
629 if (!sem_op && result)
630 goto would_block;
631
632 result += sem_op;
633 if (result < 0)
634 goto would_block;
635 if (result > SEMVMX)
636 goto out_of_range;
637
638 if (sop->sem_flg & SEM_UNDO) {
639 int undo = un->semadj[sop->sem_num] - sem_op;
640 /* Exceeding the undo range is an error. */
641 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
642 goto out_of_range;
643 un->semadj[sop->sem_num] = undo;
644 }
645
646 curr->semval = result;
647 }
648
649 sop--;
650 pid = q->pid;
651 while (sop >= sops) {
652 sma->sem_base[sop->sem_num].sempid = pid;
653 sop--;
654 }
655
656 return 0;
657
658 out_of_range:
659 result = -ERANGE;
660 goto undo;
661
662 would_block:
663 q->blocking = sop;
664
665 if (sop->sem_flg & IPC_NOWAIT)
666 result = -EAGAIN;
667 else
668 result = 1;
669
670 undo:
671 sop--;
672 while (sop >= sops) {
673 sem_op = sop->sem_op;
674 sma->sem_base[sop->sem_num].semval -= sem_op;
675 if (sop->sem_flg & SEM_UNDO)
676 un->semadj[sop->sem_num] += sem_op;
677 sop--;
678 }
679
680 return result;
681 }
682
683 /** wake_up_sem_queue_prepare(q, error): Prepare wake-up
684 * @q: queue entry that must be signaled
685 * @error: Error value for the signal
686 *
687 * Prepare the wake-up of the queue entry q.
688 */
wake_up_sem_queue_prepare(struct list_head * pt,struct sem_queue * q,int error)689 static void wake_up_sem_queue_prepare(struct list_head *pt,
690 struct sem_queue *q, int error)
691 {
692 if (list_empty(pt)) {
693 /*
694 * Hold preempt off so that we don't get preempted and have the
695 * wakee busy-wait until we're scheduled back on.
696 */
697 preempt_disable();
698 }
699 q->status = IN_WAKEUP;
700 q->pid = error;
701
702 list_add_tail(&q->list, pt);
703 }
704
705 /**
706 * wake_up_sem_queue_do - do the actual wake-up
707 * @pt: list of tasks to be woken up
708 *
709 * Do the actual wake-up.
710 * The function is called without any locks held, thus the semaphore array
711 * could be destroyed already and the tasks can disappear as soon as the
712 * status is set to the actual return code.
713 */
wake_up_sem_queue_do(struct list_head * pt)714 static void wake_up_sem_queue_do(struct list_head *pt)
715 {
716 struct sem_queue *q, *t;
717 int did_something;
718
719 did_something = !list_empty(pt);
720 list_for_each_entry_safe(q, t, pt, list) {
721 wake_up_process(q->sleeper);
722 /* q can disappear immediately after writing q->status. */
723 smp_wmb();
724 q->status = q->pid;
725 }
726 if (did_something)
727 preempt_enable();
728 }
729
unlink_queue(struct sem_array * sma,struct sem_queue * q)730 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
731 {
732 list_del(&q->list);
733 if (q->nsops > 1)
734 sma->complex_count--;
735 }
736
737 /** check_restart(sma, q)
738 * @sma: semaphore array
739 * @q: the operation that just completed
740 *
741 * update_queue is O(N^2) when it restarts scanning the whole queue of
742 * waiting operations. Therefore this function checks if the restart is
743 * really necessary. It is called after a previously waiting operation
744 * modified the array.
745 * Note that wait-for-zero operations are handled without restart.
746 */
check_restart(struct sem_array * sma,struct sem_queue * q)747 static int check_restart(struct sem_array *sma, struct sem_queue *q)
748 {
749 /* pending complex alter operations are too difficult to analyse */
750 if (!list_empty(&sma->pending_alter))
751 return 1;
752
753 /* we were a sleeping complex operation. Too difficult */
754 if (q->nsops > 1)
755 return 1;
756
757 /* It is impossible that someone waits for the new value:
758 * - complex operations always restart.
759 * - wait-for-zero are handled seperately.
760 * - q is a previously sleeping simple operation that
761 * altered the array. It must be a decrement, because
762 * simple increments never sleep.
763 * - If there are older (higher priority) decrements
764 * in the queue, then they have observed the original
765 * semval value and couldn't proceed. The operation
766 * decremented to value - thus they won't proceed either.
767 */
768 return 0;
769 }
770
771 /**
772 * wake_const_ops - wake up non-alter tasks
773 * @sma: semaphore array.
774 * @semnum: semaphore that was modified.
775 * @pt: list head for the tasks that must be woken up.
776 *
777 * wake_const_ops must be called after a semaphore in a semaphore array
778 * was set to 0. If complex const operations are pending, wake_const_ops must
779 * be called with semnum = -1, as well as with the number of each modified
780 * semaphore.
781 * The tasks that must be woken up are added to @pt. The return code
782 * is stored in q->pid.
783 * The function returns 1 if at least one operation was completed successfully.
784 */
wake_const_ops(struct sem_array * sma,int semnum,struct list_head * pt)785 static int wake_const_ops(struct sem_array *sma, int semnum,
786 struct list_head *pt)
787 {
788 struct sem_queue *q;
789 struct list_head *walk;
790 struct list_head *pending_list;
791 int semop_completed = 0;
792
793 if (semnum == -1)
794 pending_list = &sma->pending_const;
795 else
796 pending_list = &sma->sem_base[semnum].pending_const;
797
798 walk = pending_list->next;
799 while (walk != pending_list) {
800 int error;
801
802 q = container_of(walk, struct sem_queue, list);
803 walk = walk->next;
804
805 error = perform_atomic_semop(sma, q);
806
807 if (error <= 0) {
808 /* operation completed, remove from queue & wakeup */
809
810 unlink_queue(sma, q);
811
812 wake_up_sem_queue_prepare(pt, q, error);
813 if (error == 0)
814 semop_completed = 1;
815 }
816 }
817 return semop_completed;
818 }
819
820 /**
821 * do_smart_wakeup_zero - wakeup all wait for zero tasks
822 * @sma: semaphore array
823 * @sops: operations that were performed
824 * @nsops: number of operations
825 * @pt: list head of the tasks that must be woken up.
826 *
827 * Checks all required queue for wait-for-zero operations, based
828 * on the actual changes that were performed on the semaphore array.
829 * The function returns 1 if at least one operation was completed successfully.
830 */
do_smart_wakeup_zero(struct sem_array * sma,struct sembuf * sops,int nsops,struct list_head * pt)831 static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
832 int nsops, struct list_head *pt)
833 {
834 int i;
835 int semop_completed = 0;
836 int got_zero = 0;
837
838 /* first: the per-semaphore queues, if known */
839 if (sops) {
840 for (i = 0; i < nsops; i++) {
841 int num = sops[i].sem_num;
842
843 if (sma->sem_base[num].semval == 0) {
844 got_zero = 1;
845 semop_completed |= wake_const_ops(sma, num, pt);
846 }
847 }
848 } else {
849 /*
850 * No sops means modified semaphores not known.
851 * Assume all were changed.
852 */
853 for (i = 0; i < sma->sem_nsems; i++) {
854 if (sma->sem_base[i].semval == 0) {
855 got_zero = 1;
856 semop_completed |= wake_const_ops(sma, i, pt);
857 }
858 }
859 }
860 /*
861 * If one of the modified semaphores got 0,
862 * then check the global queue, too.
863 */
864 if (got_zero)
865 semop_completed |= wake_const_ops(sma, -1, pt);
866
867 return semop_completed;
868 }
869
870
871 /**
872 * update_queue - look for tasks that can be completed.
873 * @sma: semaphore array.
874 * @semnum: semaphore that was modified.
875 * @pt: list head for the tasks that must be woken up.
876 *
877 * update_queue must be called after a semaphore in a semaphore array
878 * was modified. If multiple semaphores were modified, update_queue must
879 * be called with semnum = -1, as well as with the number of each modified
880 * semaphore.
881 * The tasks that must be woken up are added to @pt. The return code
882 * is stored in q->pid.
883 * The function internally checks if const operations can now succeed.
884 *
885 * The function return 1 if at least one semop was completed successfully.
886 */
update_queue(struct sem_array * sma,int semnum,struct list_head * pt)887 static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
888 {
889 struct sem_queue *q;
890 struct list_head *walk;
891 struct list_head *pending_list;
892 int semop_completed = 0;
893
894 if (semnum == -1)
895 pending_list = &sma->pending_alter;
896 else
897 pending_list = &sma->sem_base[semnum].pending_alter;
898
899 again:
900 walk = pending_list->next;
901 while (walk != pending_list) {
902 int error, restart;
903
904 q = container_of(walk, struct sem_queue, list);
905 walk = walk->next;
906
907 /* If we are scanning the single sop, per-semaphore list of
908 * one semaphore and that semaphore is 0, then it is not
909 * necessary to scan further: simple increments
910 * that affect only one entry succeed immediately and cannot
911 * be in the per semaphore pending queue, and decrements
912 * cannot be successful if the value is already 0.
913 */
914 if (semnum != -1 && sma->sem_base[semnum].semval == 0)
915 break;
916
917 error = perform_atomic_semop(sma, q);
918
919 /* Does q->sleeper still need to sleep? */
920 if (error > 0)
921 continue;
922
923 unlink_queue(sma, q);
924
925 if (error) {
926 restart = 0;
927 } else {
928 semop_completed = 1;
929 do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
930 restart = check_restart(sma, q);
931 }
932
933 wake_up_sem_queue_prepare(pt, q, error);
934 if (restart)
935 goto again;
936 }
937 return semop_completed;
938 }
939
940 /**
941 * set_semotime - set sem_otime
942 * @sma: semaphore array
943 * @sops: operations that modified the array, may be NULL
944 *
945 * sem_otime is replicated to avoid cache line trashing.
946 * This function sets one instance to the current time.
947 */
set_semotime(struct sem_array * sma,struct sembuf * sops)948 static void set_semotime(struct sem_array *sma, struct sembuf *sops)
949 {
950 if (sops == NULL) {
951 sma->sem_base[0].sem_otime = get_seconds();
952 } else {
953 sma->sem_base[sops[0].sem_num].sem_otime =
954 get_seconds();
955 }
956 }
957
958 /**
959 * do_smart_update - optimized update_queue
960 * @sma: semaphore array
961 * @sops: operations that were performed
962 * @nsops: number of operations
963 * @otime: force setting otime
964 * @pt: list head of the tasks that must be woken up.
965 *
966 * do_smart_update() does the required calls to update_queue and wakeup_zero,
967 * based on the actual changes that were performed on the semaphore array.
968 * Note that the function does not do the actual wake-up: the caller is
969 * responsible for calling wake_up_sem_queue_do(@pt).
970 * It is safe to perform this call after dropping all locks.
971 */
do_smart_update(struct sem_array * sma,struct sembuf * sops,int nsops,int otime,struct list_head * pt)972 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
973 int otime, struct list_head *pt)
974 {
975 int i;
976
977 otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
978
979 if (!list_empty(&sma->pending_alter)) {
980 /* semaphore array uses the global queue - just process it. */
981 otime |= update_queue(sma, -1, pt);
982 } else {
983 if (!sops) {
984 /*
985 * No sops, thus the modified semaphores are not
986 * known. Check all.
987 */
988 for (i = 0; i < sma->sem_nsems; i++)
989 otime |= update_queue(sma, i, pt);
990 } else {
991 /*
992 * Check the semaphores that were increased:
993 * - No complex ops, thus all sleeping ops are
994 * decrease.
995 * - if we decreased the value, then any sleeping
996 * semaphore ops wont be able to run: If the
997 * previous value was too small, then the new
998 * value will be too small, too.
999 */
1000 for (i = 0; i < nsops; i++) {
1001 if (sops[i].sem_op > 0) {
1002 otime |= update_queue(sma,
1003 sops[i].sem_num, pt);
1004 }
1005 }
1006 }
1007 }
1008 if (otime)
1009 set_semotime(sma, sops);
1010 }
1011
1012 /*
1013 * check_qop: Test if a queued operation sleeps on the semaphore semnum
1014 */
check_qop(struct sem_array * sma,int semnum,struct sem_queue * q,bool count_zero)1015 static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1016 bool count_zero)
1017 {
1018 struct sembuf *sop = q->blocking;
1019
1020 /*
1021 * Linux always (since 0.99.10) reported a task as sleeping on all
1022 * semaphores. This violates SUS, therefore it was changed to the
1023 * standard compliant behavior.
1024 * Give the administrators a chance to notice that an application
1025 * might misbehave because it relies on the Linux behavior.
1026 */
1027 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1028 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1029 current->comm, task_pid_nr(current));
1030
1031 if (sop->sem_num != semnum)
1032 return 0;
1033
1034 if (count_zero && sop->sem_op == 0)
1035 return 1;
1036 if (!count_zero && sop->sem_op < 0)
1037 return 1;
1038
1039 return 0;
1040 }
1041
1042 /* The following counts are associated to each semaphore:
1043 * semncnt number of tasks waiting on semval being nonzero
1044 * semzcnt number of tasks waiting on semval being zero
1045 *
1046 * Per definition, a task waits only on the semaphore of the first semop
1047 * that cannot proceed, even if additional operation would block, too.
1048 */
count_semcnt(struct sem_array * sma,ushort semnum,bool count_zero)1049 static int count_semcnt(struct sem_array *sma, ushort semnum,
1050 bool count_zero)
1051 {
1052 struct list_head *l;
1053 struct sem_queue *q;
1054 int semcnt;
1055
1056 semcnt = 0;
1057 /* First: check the simple operations. They are easy to evaluate */
1058 if (count_zero)
1059 l = &sma->sem_base[semnum].pending_const;
1060 else
1061 l = &sma->sem_base[semnum].pending_alter;
1062
1063 list_for_each_entry(q, l, list) {
1064 /* all task on a per-semaphore list sleep on exactly
1065 * that semaphore
1066 */
1067 semcnt++;
1068 }
1069
1070 /* Then: check the complex operations. */
1071 list_for_each_entry(q, &sma->pending_alter, list) {
1072 semcnt += check_qop(sma, semnum, q, count_zero);
1073 }
1074 if (count_zero) {
1075 list_for_each_entry(q, &sma->pending_const, list) {
1076 semcnt += check_qop(sma, semnum, q, count_zero);
1077 }
1078 }
1079 return semcnt;
1080 }
1081
1082 /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1083 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1084 * remains locked on exit.
1085 */
freeary(struct ipc_namespace * ns,struct kern_ipc_perm * ipcp)1086 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1087 {
1088 struct sem_undo *un, *tu;
1089 struct sem_queue *q, *tq;
1090 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1091 struct list_head tasks;
1092 int i;
1093
1094 /* Free the existing undo structures for this semaphore set. */
1095 ipc_assert_locked_object(&sma->sem_perm);
1096 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1097 list_del(&un->list_id);
1098 spin_lock(&un->ulp->lock);
1099 un->semid = -1;
1100 list_del_rcu(&un->list_proc);
1101 spin_unlock(&un->ulp->lock);
1102 kfree_rcu(un, rcu);
1103 }
1104
1105 /* Wake up all pending processes and let them fail with EIDRM. */
1106 INIT_LIST_HEAD(&tasks);
1107 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1108 unlink_queue(sma, q);
1109 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1110 }
1111
1112 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1113 unlink_queue(sma, q);
1114 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1115 }
1116 for (i = 0; i < sma->sem_nsems; i++) {
1117 struct sem *sem = sma->sem_base + i;
1118 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1119 unlink_queue(sma, q);
1120 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1121 }
1122 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1123 unlink_queue(sma, q);
1124 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1125 }
1126 }
1127
1128 /* Remove the semaphore set from the IDR */
1129 sem_rmid(ns, sma);
1130 sem_unlock(sma, -1);
1131 rcu_read_unlock();
1132
1133 wake_up_sem_queue_do(&tasks);
1134 ns->used_sems -= sma->sem_nsems;
1135 ipc_rcu_putref(sma, sem_rcu_free);
1136 }
1137
copy_semid_to_user(void __user * buf,struct semid64_ds * in,int version)1138 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1139 {
1140 switch (version) {
1141 case IPC_64:
1142 return copy_to_user(buf, in, sizeof(*in));
1143 case IPC_OLD:
1144 {
1145 struct semid_ds out;
1146
1147 memset(&out, 0, sizeof(out));
1148
1149 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1150
1151 out.sem_otime = in->sem_otime;
1152 out.sem_ctime = in->sem_ctime;
1153 out.sem_nsems = in->sem_nsems;
1154
1155 return copy_to_user(buf, &out, sizeof(out));
1156 }
1157 default:
1158 return -EINVAL;
1159 }
1160 }
1161
get_semotime(struct sem_array * sma)1162 static time_t get_semotime(struct sem_array *sma)
1163 {
1164 int i;
1165 time_t res;
1166
1167 res = sma->sem_base[0].sem_otime;
1168 for (i = 1; i < sma->sem_nsems; i++) {
1169 time_t to = sma->sem_base[i].sem_otime;
1170
1171 if (to > res)
1172 res = to;
1173 }
1174 return res;
1175 }
1176
semctl_nolock(struct ipc_namespace * ns,int semid,int cmd,int version,void __user * p)1177 static int semctl_nolock(struct ipc_namespace *ns, int semid,
1178 int cmd, int version, void __user *p)
1179 {
1180 int err;
1181 struct sem_array *sma;
1182
1183 switch (cmd) {
1184 case IPC_INFO:
1185 case SEM_INFO:
1186 {
1187 struct seminfo seminfo;
1188 int max_id;
1189
1190 err = security_sem_semctl(NULL, cmd);
1191 if (err)
1192 return err;
1193
1194 memset(&seminfo, 0, sizeof(seminfo));
1195 seminfo.semmni = ns->sc_semmni;
1196 seminfo.semmns = ns->sc_semmns;
1197 seminfo.semmsl = ns->sc_semmsl;
1198 seminfo.semopm = ns->sc_semopm;
1199 seminfo.semvmx = SEMVMX;
1200 seminfo.semmnu = SEMMNU;
1201 seminfo.semmap = SEMMAP;
1202 seminfo.semume = SEMUME;
1203 down_read(&sem_ids(ns).rwsem);
1204 if (cmd == SEM_INFO) {
1205 seminfo.semusz = sem_ids(ns).in_use;
1206 seminfo.semaem = ns->used_sems;
1207 } else {
1208 seminfo.semusz = SEMUSZ;
1209 seminfo.semaem = SEMAEM;
1210 }
1211 max_id = ipc_get_maxid(&sem_ids(ns));
1212 up_read(&sem_ids(ns).rwsem);
1213 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1214 return -EFAULT;
1215 return (max_id < 0) ? 0 : max_id;
1216 }
1217 case IPC_STAT:
1218 case SEM_STAT:
1219 {
1220 struct semid64_ds tbuf;
1221 int id = 0;
1222
1223 memset(&tbuf, 0, sizeof(tbuf));
1224
1225 rcu_read_lock();
1226 if (cmd == SEM_STAT) {
1227 sma = sem_obtain_object(ns, semid);
1228 if (IS_ERR(sma)) {
1229 err = PTR_ERR(sma);
1230 goto out_unlock;
1231 }
1232 id = sma->sem_perm.id;
1233 } else {
1234 sma = sem_obtain_object_check(ns, semid);
1235 if (IS_ERR(sma)) {
1236 err = PTR_ERR(sma);
1237 goto out_unlock;
1238 }
1239 }
1240
1241 err = -EACCES;
1242 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1243 goto out_unlock;
1244
1245 err = security_sem_semctl(sma, cmd);
1246 if (err)
1247 goto out_unlock;
1248
1249 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1250 tbuf.sem_otime = get_semotime(sma);
1251 tbuf.sem_ctime = sma->sem_ctime;
1252 tbuf.sem_nsems = sma->sem_nsems;
1253 rcu_read_unlock();
1254 if (copy_semid_to_user(p, &tbuf, version))
1255 return -EFAULT;
1256 return id;
1257 }
1258 default:
1259 return -EINVAL;
1260 }
1261 out_unlock:
1262 rcu_read_unlock();
1263 return err;
1264 }
1265
semctl_setval(struct ipc_namespace * ns,int semid,int semnum,unsigned long arg)1266 static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1267 unsigned long arg)
1268 {
1269 struct sem_undo *un;
1270 struct sem_array *sma;
1271 struct sem *curr;
1272 int err;
1273 struct list_head tasks;
1274 int val;
1275 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1276 /* big-endian 64bit */
1277 val = arg >> 32;
1278 #else
1279 /* 32bit or little-endian 64bit */
1280 val = arg;
1281 #endif
1282
1283 if (val > SEMVMX || val < 0)
1284 return -ERANGE;
1285
1286 INIT_LIST_HEAD(&tasks);
1287
1288 rcu_read_lock();
1289 sma = sem_obtain_object_check(ns, semid);
1290 if (IS_ERR(sma)) {
1291 rcu_read_unlock();
1292 return PTR_ERR(sma);
1293 }
1294
1295 if (semnum < 0 || semnum >= sma->sem_nsems) {
1296 rcu_read_unlock();
1297 return -EINVAL;
1298 }
1299
1300
1301 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1302 rcu_read_unlock();
1303 return -EACCES;
1304 }
1305
1306 err = security_sem_semctl(sma, SETVAL);
1307 if (err) {
1308 rcu_read_unlock();
1309 return -EACCES;
1310 }
1311
1312 sem_lock(sma, NULL, -1);
1313
1314 if (!ipc_valid_object(&sma->sem_perm)) {
1315 sem_unlock(sma, -1);
1316 rcu_read_unlock();
1317 return -EIDRM;
1318 }
1319
1320 curr = &sma->sem_base[semnum];
1321
1322 ipc_assert_locked_object(&sma->sem_perm);
1323 list_for_each_entry(un, &sma->list_id, list_id)
1324 un->semadj[semnum] = 0;
1325
1326 curr->semval = val;
1327 curr->sempid = task_tgid_vnr(current);
1328 sma->sem_ctime = get_seconds();
1329 /* maybe some queued-up processes were waiting for this */
1330 do_smart_update(sma, NULL, 0, 0, &tasks);
1331 sem_unlock(sma, -1);
1332 rcu_read_unlock();
1333 wake_up_sem_queue_do(&tasks);
1334 return 0;
1335 }
1336
semctl_main(struct ipc_namespace * ns,int semid,int semnum,int cmd,void __user * p)1337 static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1338 int cmd, void __user *p)
1339 {
1340 struct sem_array *sma;
1341 struct sem *curr;
1342 int err, nsems;
1343 ushort fast_sem_io[SEMMSL_FAST];
1344 ushort *sem_io = fast_sem_io;
1345 struct list_head tasks;
1346
1347 INIT_LIST_HEAD(&tasks);
1348
1349 rcu_read_lock();
1350 sma = sem_obtain_object_check(ns, semid);
1351 if (IS_ERR(sma)) {
1352 rcu_read_unlock();
1353 return PTR_ERR(sma);
1354 }
1355
1356 nsems = sma->sem_nsems;
1357
1358 err = -EACCES;
1359 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1360 goto out_rcu_wakeup;
1361
1362 err = security_sem_semctl(sma, cmd);
1363 if (err)
1364 goto out_rcu_wakeup;
1365
1366 err = -EACCES;
1367 switch (cmd) {
1368 case GETALL:
1369 {
1370 ushort __user *array = p;
1371 int i;
1372
1373 sem_lock(sma, NULL, -1);
1374 if (!ipc_valid_object(&sma->sem_perm)) {
1375 err = -EIDRM;
1376 goto out_unlock;
1377 }
1378 if (nsems > SEMMSL_FAST) {
1379 if (!ipc_rcu_getref(sma)) {
1380 err = -EIDRM;
1381 goto out_unlock;
1382 }
1383 sem_unlock(sma, -1);
1384 rcu_read_unlock();
1385 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1386 if (sem_io == NULL) {
1387 ipc_rcu_putref(sma, sem_rcu_free);
1388 return -ENOMEM;
1389 }
1390
1391 rcu_read_lock();
1392 sem_lock_and_putref(sma);
1393 if (!ipc_valid_object(&sma->sem_perm)) {
1394 err = -EIDRM;
1395 goto out_unlock;
1396 }
1397 }
1398 for (i = 0; i < sma->sem_nsems; i++)
1399 sem_io[i] = sma->sem_base[i].semval;
1400 sem_unlock(sma, -1);
1401 rcu_read_unlock();
1402 err = 0;
1403 if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1404 err = -EFAULT;
1405 goto out_free;
1406 }
1407 case SETALL:
1408 {
1409 int i;
1410 struct sem_undo *un;
1411
1412 if (!ipc_rcu_getref(sma)) {
1413 err = -EIDRM;
1414 goto out_rcu_wakeup;
1415 }
1416 rcu_read_unlock();
1417
1418 if (nsems > SEMMSL_FAST) {
1419 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1420 if (sem_io == NULL) {
1421 ipc_rcu_putref(sma, sem_rcu_free);
1422 return -ENOMEM;
1423 }
1424 }
1425
1426 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1427 ipc_rcu_putref(sma, sem_rcu_free);
1428 err = -EFAULT;
1429 goto out_free;
1430 }
1431
1432 for (i = 0; i < nsems; i++) {
1433 if (sem_io[i] > SEMVMX) {
1434 ipc_rcu_putref(sma, sem_rcu_free);
1435 err = -ERANGE;
1436 goto out_free;
1437 }
1438 }
1439 rcu_read_lock();
1440 sem_lock_and_putref(sma);
1441 if (!ipc_valid_object(&sma->sem_perm)) {
1442 err = -EIDRM;
1443 goto out_unlock;
1444 }
1445
1446 for (i = 0; i < nsems; i++)
1447 sma->sem_base[i].semval = sem_io[i];
1448
1449 ipc_assert_locked_object(&sma->sem_perm);
1450 list_for_each_entry(un, &sma->list_id, list_id) {
1451 for (i = 0; i < nsems; i++)
1452 un->semadj[i] = 0;
1453 }
1454 sma->sem_ctime = get_seconds();
1455 /* maybe some queued-up processes were waiting for this */
1456 do_smart_update(sma, NULL, 0, 0, &tasks);
1457 err = 0;
1458 goto out_unlock;
1459 }
1460 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1461 }
1462 err = -EINVAL;
1463 if (semnum < 0 || semnum >= nsems)
1464 goto out_rcu_wakeup;
1465
1466 sem_lock(sma, NULL, -1);
1467 if (!ipc_valid_object(&sma->sem_perm)) {
1468 err = -EIDRM;
1469 goto out_unlock;
1470 }
1471 curr = &sma->sem_base[semnum];
1472
1473 switch (cmd) {
1474 case GETVAL:
1475 err = curr->semval;
1476 goto out_unlock;
1477 case GETPID:
1478 err = curr->sempid;
1479 goto out_unlock;
1480 case GETNCNT:
1481 err = count_semcnt(sma, semnum, 0);
1482 goto out_unlock;
1483 case GETZCNT:
1484 err = count_semcnt(sma, semnum, 1);
1485 goto out_unlock;
1486 }
1487
1488 out_unlock:
1489 sem_unlock(sma, -1);
1490 out_rcu_wakeup:
1491 rcu_read_unlock();
1492 wake_up_sem_queue_do(&tasks);
1493 out_free:
1494 if (sem_io != fast_sem_io)
1495 ipc_free(sem_io, sizeof(ushort)*nsems);
1496 return err;
1497 }
1498
1499 static inline unsigned long
copy_semid_from_user(struct semid64_ds * out,void __user * buf,int version)1500 copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1501 {
1502 switch (version) {
1503 case IPC_64:
1504 if (copy_from_user(out, buf, sizeof(*out)))
1505 return -EFAULT;
1506 return 0;
1507 case IPC_OLD:
1508 {
1509 struct semid_ds tbuf_old;
1510
1511 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1512 return -EFAULT;
1513
1514 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1515 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1516 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1517
1518 return 0;
1519 }
1520 default:
1521 return -EINVAL;
1522 }
1523 }
1524
1525 /*
1526 * This function handles some semctl commands which require the rwsem
1527 * to be held in write mode.
1528 * NOTE: no locks must be held, the rwsem is taken inside this function.
1529 */
semctl_down(struct ipc_namespace * ns,int semid,int cmd,int version,void __user * p)1530 static int semctl_down(struct ipc_namespace *ns, int semid,
1531 int cmd, int version, void __user *p)
1532 {
1533 struct sem_array *sma;
1534 int err;
1535 struct semid64_ds semid64;
1536 struct kern_ipc_perm *ipcp;
1537
1538 if (cmd == IPC_SET) {
1539 if (copy_semid_from_user(&semid64, p, version))
1540 return -EFAULT;
1541 }
1542
1543 down_write(&sem_ids(ns).rwsem);
1544 rcu_read_lock();
1545
1546 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1547 &semid64.sem_perm, 0);
1548 if (IS_ERR(ipcp)) {
1549 err = PTR_ERR(ipcp);
1550 goto out_unlock1;
1551 }
1552
1553 sma = container_of(ipcp, struct sem_array, sem_perm);
1554
1555 err = security_sem_semctl(sma, cmd);
1556 if (err)
1557 goto out_unlock1;
1558
1559 switch (cmd) {
1560 case IPC_RMID:
1561 sem_lock(sma, NULL, -1);
1562 /* freeary unlocks the ipc object and rcu */
1563 freeary(ns, ipcp);
1564 goto out_up;
1565 case IPC_SET:
1566 sem_lock(sma, NULL, -1);
1567 err = ipc_update_perm(&semid64.sem_perm, ipcp);
1568 if (err)
1569 goto out_unlock0;
1570 sma->sem_ctime = get_seconds();
1571 break;
1572 default:
1573 err = -EINVAL;
1574 goto out_unlock1;
1575 }
1576
1577 out_unlock0:
1578 sem_unlock(sma, -1);
1579 out_unlock1:
1580 rcu_read_unlock();
1581 out_up:
1582 up_write(&sem_ids(ns).rwsem);
1583 return err;
1584 }
1585
SYSCALL_DEFINE4(semctl,int,semid,int,semnum,int,cmd,unsigned long,arg)1586 SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1587 {
1588 int version;
1589 struct ipc_namespace *ns;
1590 void __user *p = (void __user *)arg;
1591
1592 if (semid < 0)
1593 return -EINVAL;
1594
1595 version = ipc_parse_version(&cmd);
1596 ns = current->nsproxy->ipc_ns;
1597
1598 switch (cmd) {
1599 case IPC_INFO:
1600 case SEM_INFO:
1601 case IPC_STAT:
1602 case SEM_STAT:
1603 return semctl_nolock(ns, semid, cmd, version, p);
1604 case GETALL:
1605 case GETVAL:
1606 case GETPID:
1607 case GETNCNT:
1608 case GETZCNT:
1609 case SETALL:
1610 return semctl_main(ns, semid, semnum, cmd, p);
1611 case SETVAL:
1612 return semctl_setval(ns, semid, semnum, arg);
1613 case IPC_RMID:
1614 case IPC_SET:
1615 return semctl_down(ns, semid, cmd, version, p);
1616 default:
1617 return -EINVAL;
1618 }
1619 }
1620
1621 /* If the task doesn't already have a undo_list, then allocate one
1622 * here. We guarantee there is only one thread using this undo list,
1623 * and current is THE ONE
1624 *
1625 * If this allocation and assignment succeeds, but later
1626 * portions of this code fail, there is no need to free the sem_undo_list.
1627 * Just let it stay associated with the task, and it'll be freed later
1628 * at exit time.
1629 *
1630 * This can block, so callers must hold no locks.
1631 */
get_undo_list(struct sem_undo_list ** undo_listp)1632 static inline int get_undo_list(struct sem_undo_list **undo_listp)
1633 {
1634 struct sem_undo_list *undo_list;
1635
1636 undo_list = current->sysvsem.undo_list;
1637 if (!undo_list) {
1638 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1639 if (undo_list == NULL)
1640 return -ENOMEM;
1641 spin_lock_init(&undo_list->lock);
1642 atomic_set(&undo_list->refcnt, 1);
1643 INIT_LIST_HEAD(&undo_list->list_proc);
1644
1645 current->sysvsem.undo_list = undo_list;
1646 }
1647 *undo_listp = undo_list;
1648 return 0;
1649 }
1650
__lookup_undo(struct sem_undo_list * ulp,int semid)1651 static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1652 {
1653 struct sem_undo *un;
1654
1655 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1656 if (un->semid == semid)
1657 return un;
1658 }
1659 return NULL;
1660 }
1661
lookup_undo(struct sem_undo_list * ulp,int semid)1662 static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1663 {
1664 struct sem_undo *un;
1665
1666 assert_spin_locked(&ulp->lock);
1667
1668 un = __lookup_undo(ulp, semid);
1669 if (un) {
1670 list_del_rcu(&un->list_proc);
1671 list_add_rcu(&un->list_proc, &ulp->list_proc);
1672 }
1673 return un;
1674 }
1675
1676 /**
1677 * find_alloc_undo - lookup (and if not present create) undo array
1678 * @ns: namespace
1679 * @semid: semaphore array id
1680 *
1681 * The function looks up (and if not present creates) the undo structure.
1682 * The size of the undo structure depends on the size of the semaphore
1683 * array, thus the alloc path is not that straightforward.
1684 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1685 * performs a rcu_read_lock().
1686 */
find_alloc_undo(struct ipc_namespace * ns,int semid)1687 static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1688 {
1689 struct sem_array *sma;
1690 struct sem_undo_list *ulp;
1691 struct sem_undo *un, *new;
1692 int nsems, error;
1693
1694 error = get_undo_list(&ulp);
1695 if (error)
1696 return ERR_PTR(error);
1697
1698 rcu_read_lock();
1699 spin_lock(&ulp->lock);
1700 un = lookup_undo(ulp, semid);
1701 spin_unlock(&ulp->lock);
1702 if (likely(un != NULL))
1703 goto out;
1704
1705 /* no undo structure around - allocate one. */
1706 /* step 1: figure out the size of the semaphore array */
1707 sma = sem_obtain_object_check(ns, semid);
1708 if (IS_ERR(sma)) {
1709 rcu_read_unlock();
1710 return ERR_CAST(sma);
1711 }
1712
1713 nsems = sma->sem_nsems;
1714 if (!ipc_rcu_getref(sma)) {
1715 rcu_read_unlock();
1716 un = ERR_PTR(-EIDRM);
1717 goto out;
1718 }
1719 rcu_read_unlock();
1720
1721 /* step 2: allocate new undo structure */
1722 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1723 if (!new) {
1724 ipc_rcu_putref(sma, sem_rcu_free);
1725 return ERR_PTR(-ENOMEM);
1726 }
1727
1728 /* step 3: Acquire the lock on semaphore array */
1729 rcu_read_lock();
1730 sem_lock_and_putref(sma);
1731 if (!ipc_valid_object(&sma->sem_perm)) {
1732 sem_unlock(sma, -1);
1733 rcu_read_unlock();
1734 kfree(new);
1735 un = ERR_PTR(-EIDRM);
1736 goto out;
1737 }
1738 spin_lock(&ulp->lock);
1739
1740 /*
1741 * step 4: check for races: did someone else allocate the undo struct?
1742 */
1743 un = lookup_undo(ulp, semid);
1744 if (un) {
1745 kfree(new);
1746 goto success;
1747 }
1748 /* step 5: initialize & link new undo structure */
1749 new->semadj = (short *) &new[1];
1750 new->ulp = ulp;
1751 new->semid = semid;
1752 assert_spin_locked(&ulp->lock);
1753 list_add_rcu(&new->list_proc, &ulp->list_proc);
1754 ipc_assert_locked_object(&sma->sem_perm);
1755 list_add(&new->list_id, &sma->list_id);
1756 un = new;
1757
1758 success:
1759 spin_unlock(&ulp->lock);
1760 sem_unlock(sma, -1);
1761 out:
1762 return un;
1763 }
1764
1765
1766 /**
1767 * get_queue_result - retrieve the result code from sem_queue
1768 * @q: Pointer to queue structure
1769 *
1770 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1771 * q->status, then we must loop until the value is replaced with the final
1772 * value: This may happen if a task is woken up by an unrelated event (e.g.
1773 * signal) and in parallel the task is woken up by another task because it got
1774 * the requested semaphores.
1775 *
1776 * The function can be called with or without holding the semaphore spinlock.
1777 */
get_queue_result(struct sem_queue * q)1778 static int get_queue_result(struct sem_queue *q)
1779 {
1780 int error;
1781
1782 error = q->status;
1783 while (unlikely(error == IN_WAKEUP)) {
1784 cpu_relax();
1785 error = q->status;
1786 }
1787
1788 return error;
1789 }
1790
SYSCALL_DEFINE4(semtimedop,int,semid,struct sembuf __user *,tsops,unsigned,nsops,const struct timespec __user *,timeout)1791 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1792 unsigned, nsops, const struct timespec __user *, timeout)
1793 {
1794 int error = -EINVAL;
1795 struct sem_array *sma;
1796 struct sembuf fast_sops[SEMOPM_FAST];
1797 struct sembuf *sops = fast_sops, *sop;
1798 struct sem_undo *un;
1799 int undos = 0, alter = 0, max, locknum;
1800 struct sem_queue queue;
1801 unsigned long jiffies_left = 0;
1802 struct ipc_namespace *ns;
1803 struct list_head tasks;
1804
1805 ns = current->nsproxy->ipc_ns;
1806
1807 if (nsops < 1 || semid < 0)
1808 return -EINVAL;
1809 if (nsops > ns->sc_semopm)
1810 return -E2BIG;
1811 if (nsops > SEMOPM_FAST) {
1812 sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1813 if (sops == NULL)
1814 return -ENOMEM;
1815 }
1816 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1817 error = -EFAULT;
1818 goto out_free;
1819 }
1820 if (timeout) {
1821 struct timespec _timeout;
1822 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1823 error = -EFAULT;
1824 goto out_free;
1825 }
1826 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1827 _timeout.tv_nsec >= 1000000000L) {
1828 error = -EINVAL;
1829 goto out_free;
1830 }
1831 jiffies_left = timespec_to_jiffies(&_timeout);
1832 }
1833 max = 0;
1834 for (sop = sops; sop < sops + nsops; sop++) {
1835 if (sop->sem_num >= max)
1836 max = sop->sem_num;
1837 if (sop->sem_flg & SEM_UNDO)
1838 undos = 1;
1839 if (sop->sem_op != 0)
1840 alter = 1;
1841 }
1842
1843 INIT_LIST_HEAD(&tasks);
1844
1845 if (undos) {
1846 /* On success, find_alloc_undo takes the rcu_read_lock */
1847 un = find_alloc_undo(ns, semid);
1848 if (IS_ERR(un)) {
1849 error = PTR_ERR(un);
1850 goto out_free;
1851 }
1852 } else {
1853 un = NULL;
1854 rcu_read_lock();
1855 }
1856
1857 sma = sem_obtain_object_check(ns, semid);
1858 if (IS_ERR(sma)) {
1859 rcu_read_unlock();
1860 error = PTR_ERR(sma);
1861 goto out_free;
1862 }
1863
1864 error = -EFBIG;
1865 if (max >= sma->sem_nsems)
1866 goto out_rcu_wakeup;
1867
1868 error = -EACCES;
1869 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1870 goto out_rcu_wakeup;
1871
1872 error = security_sem_semop(sma, sops, nsops, alter);
1873 if (error)
1874 goto out_rcu_wakeup;
1875
1876 error = -EIDRM;
1877 locknum = sem_lock(sma, sops, nsops);
1878 /*
1879 * We eventually might perform the following check in a lockless
1880 * fashion, considering ipc_valid_object() locking constraints.
1881 * If nsops == 1 and there is no contention for sem_perm.lock, then
1882 * only a per-semaphore lock is held and it's OK to proceed with the
1883 * check below. More details on the fine grained locking scheme
1884 * entangled here and why it's RMID race safe on comments at sem_lock()
1885 */
1886 if (!ipc_valid_object(&sma->sem_perm))
1887 goto out_unlock_free;
1888 /*
1889 * semid identifiers are not unique - find_alloc_undo may have
1890 * allocated an undo structure, it was invalidated by an RMID
1891 * and now a new array with received the same id. Check and fail.
1892 * This case can be detected checking un->semid. The existence of
1893 * "un" itself is guaranteed by rcu.
1894 */
1895 if (un && un->semid == -1)
1896 goto out_unlock_free;
1897
1898 queue.sops = sops;
1899 queue.nsops = nsops;
1900 queue.undo = un;
1901 queue.pid = task_tgid_vnr(current);
1902 queue.alter = alter;
1903
1904 error = perform_atomic_semop(sma, &queue);
1905 if (error == 0) {
1906 /* If the operation was successful, then do
1907 * the required updates.
1908 */
1909 if (alter)
1910 do_smart_update(sma, sops, nsops, 1, &tasks);
1911 else
1912 set_semotime(sma, sops);
1913 }
1914 if (error <= 0)
1915 goto out_unlock_free;
1916
1917 /* We need to sleep on this operation, so we put the current
1918 * task into the pending queue and go to sleep.
1919 */
1920
1921 if (nsops == 1) {
1922 struct sem *curr;
1923 curr = &sma->sem_base[sops->sem_num];
1924
1925 if (alter) {
1926 if (sma->complex_count) {
1927 list_add_tail(&queue.list,
1928 &sma->pending_alter);
1929 } else {
1930
1931 list_add_tail(&queue.list,
1932 &curr->pending_alter);
1933 }
1934 } else {
1935 list_add_tail(&queue.list, &curr->pending_const);
1936 }
1937 } else {
1938 if (!sma->complex_count)
1939 merge_queues(sma);
1940
1941 if (alter)
1942 list_add_tail(&queue.list, &sma->pending_alter);
1943 else
1944 list_add_tail(&queue.list, &sma->pending_const);
1945
1946 sma->complex_count++;
1947 }
1948
1949 queue.status = -EINTR;
1950 queue.sleeper = current;
1951
1952 sleep_again:
1953 current->state = TASK_INTERRUPTIBLE;
1954 sem_unlock(sma, locknum);
1955 rcu_read_unlock();
1956
1957 if (timeout)
1958 jiffies_left = schedule_timeout(jiffies_left);
1959 else
1960 schedule();
1961
1962 error = get_queue_result(&queue);
1963
1964 if (error != -EINTR) {
1965 /* fast path: update_queue already obtained all requested
1966 * resources.
1967 * Perform a smp_mb(): User space could assume that semop()
1968 * is a memory barrier: Without the mb(), the cpu could
1969 * speculatively read in user space stale data that was
1970 * overwritten by the previous owner of the semaphore.
1971 */
1972 smp_mb();
1973
1974 goto out_free;
1975 }
1976
1977 rcu_read_lock();
1978 sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
1979
1980 /*
1981 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1982 */
1983 error = get_queue_result(&queue);
1984
1985 /*
1986 * Array removed? If yes, leave without sem_unlock().
1987 */
1988 if (IS_ERR(sma)) {
1989 rcu_read_unlock();
1990 goto out_free;
1991 }
1992
1993
1994 /*
1995 * If queue.status != -EINTR we are woken up by another process.
1996 * Leave without unlink_queue(), but with sem_unlock().
1997 */
1998 if (error != -EINTR)
1999 goto out_unlock_free;
2000
2001 /*
2002 * If an interrupt occurred we have to clean up the queue
2003 */
2004 if (timeout && jiffies_left == 0)
2005 error = -EAGAIN;
2006
2007 /*
2008 * If the wakeup was spurious, just retry
2009 */
2010 if (error == -EINTR && !signal_pending(current))
2011 goto sleep_again;
2012
2013 unlink_queue(sma, &queue);
2014
2015 out_unlock_free:
2016 sem_unlock(sma, locknum);
2017 out_rcu_wakeup:
2018 rcu_read_unlock();
2019 wake_up_sem_queue_do(&tasks);
2020 out_free:
2021 if (sops != fast_sops)
2022 kfree(sops);
2023 return error;
2024 }
2025
SYSCALL_DEFINE3(semop,int,semid,struct sembuf __user *,tsops,unsigned,nsops)2026 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2027 unsigned, nsops)
2028 {
2029 return sys_semtimedop(semid, tsops, nsops, NULL);
2030 }
2031
2032 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2033 * parent and child tasks.
2034 */
2035
copy_semundo(unsigned long clone_flags,struct task_struct * tsk)2036 int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2037 {
2038 struct sem_undo_list *undo_list;
2039 int error;
2040
2041 if (clone_flags & CLONE_SYSVSEM) {
2042 error = get_undo_list(&undo_list);
2043 if (error)
2044 return error;
2045 atomic_inc(&undo_list->refcnt);
2046 tsk->sysvsem.undo_list = undo_list;
2047 } else
2048 tsk->sysvsem.undo_list = NULL;
2049
2050 return 0;
2051 }
2052
2053 /*
2054 * add semadj values to semaphores, free undo structures.
2055 * undo structures are not freed when semaphore arrays are destroyed
2056 * so some of them may be out of date.
2057 * IMPLEMENTATION NOTE: There is some confusion over whether the
2058 * set of adjustments that needs to be done should be done in an atomic
2059 * manner or not. That is, if we are attempting to decrement the semval
2060 * should we queue up and wait until we can do so legally?
2061 * The original implementation attempted to do this (queue and wait).
2062 * The current implementation does not do so. The POSIX standard
2063 * and SVID should be consulted to determine what behavior is mandated.
2064 */
exit_sem(struct task_struct * tsk)2065 void exit_sem(struct task_struct *tsk)
2066 {
2067 struct sem_undo_list *ulp;
2068
2069 ulp = tsk->sysvsem.undo_list;
2070 if (!ulp)
2071 return;
2072 tsk->sysvsem.undo_list = NULL;
2073
2074 if (!atomic_dec_and_test(&ulp->refcnt))
2075 return;
2076
2077 for (;;) {
2078 struct sem_array *sma;
2079 struct sem_undo *un;
2080 struct list_head tasks;
2081 int semid, i;
2082
2083 rcu_read_lock();
2084 un = list_entry_rcu(ulp->list_proc.next,
2085 struct sem_undo, list_proc);
2086 if (&un->list_proc == &ulp->list_proc) {
2087 /*
2088 * We must wait for freeary() before freeing this ulp,
2089 * in case we raced with last sem_undo. There is a small
2090 * possibility where we exit while freeary() didn't
2091 * finish unlocking sem_undo_list.
2092 */
2093 spin_unlock_wait(&ulp->lock);
2094 rcu_read_unlock();
2095 break;
2096 }
2097 spin_lock(&ulp->lock);
2098 semid = un->semid;
2099 spin_unlock(&ulp->lock);
2100
2101 /* exit_sem raced with IPC_RMID, nothing to do */
2102 if (semid == -1) {
2103 rcu_read_unlock();
2104 continue;
2105 }
2106
2107 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2108 /* exit_sem raced with IPC_RMID, nothing to do */
2109 if (IS_ERR(sma)) {
2110 rcu_read_unlock();
2111 continue;
2112 }
2113
2114 sem_lock(sma, NULL, -1);
2115 /* exit_sem raced with IPC_RMID, nothing to do */
2116 if (!ipc_valid_object(&sma->sem_perm)) {
2117 sem_unlock(sma, -1);
2118 rcu_read_unlock();
2119 continue;
2120 }
2121 un = __lookup_undo(ulp, semid);
2122 if (un == NULL) {
2123 /* exit_sem raced with IPC_RMID+semget() that created
2124 * exactly the same semid. Nothing to do.
2125 */
2126 sem_unlock(sma, -1);
2127 rcu_read_unlock();
2128 continue;
2129 }
2130
2131 /* remove un from the linked lists */
2132 ipc_assert_locked_object(&sma->sem_perm);
2133 list_del(&un->list_id);
2134
2135 spin_lock(&ulp->lock);
2136 list_del_rcu(&un->list_proc);
2137 spin_unlock(&ulp->lock);
2138
2139 /* perform adjustments registered in un */
2140 for (i = 0; i < sma->sem_nsems; i++) {
2141 struct sem *semaphore = &sma->sem_base[i];
2142 if (un->semadj[i]) {
2143 semaphore->semval += un->semadj[i];
2144 /*
2145 * Range checks of the new semaphore value,
2146 * not defined by sus:
2147 * - Some unices ignore the undo entirely
2148 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2149 * - some cap the value (e.g. FreeBSD caps
2150 * at 0, but doesn't enforce SEMVMX)
2151 *
2152 * Linux caps the semaphore value, both at 0
2153 * and at SEMVMX.
2154 *
2155 * Manfred <manfred@colorfullife.com>
2156 */
2157 if (semaphore->semval < 0)
2158 semaphore->semval = 0;
2159 if (semaphore->semval > SEMVMX)
2160 semaphore->semval = SEMVMX;
2161 semaphore->sempid = task_tgid_vnr(current);
2162 }
2163 }
2164 /* maybe some queued-up processes were waiting for this */
2165 INIT_LIST_HEAD(&tasks);
2166 do_smart_update(sma, NULL, 0, 1, &tasks);
2167 sem_unlock(sma, -1);
2168 rcu_read_unlock();
2169 wake_up_sem_queue_do(&tasks);
2170
2171 kfree_rcu(un, rcu);
2172 }
2173 kfree(ulp);
2174 }
2175
2176 #ifdef CONFIG_PROC_FS
sysvipc_sem_proc_show(struct seq_file * s,void * it)2177 static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2178 {
2179 struct user_namespace *user_ns = seq_user_ns(s);
2180 struct sem_array *sma = it;
2181 time_t sem_otime;
2182
2183 /*
2184 * The proc interface isn't aware of sem_lock(), it calls
2185 * ipc_lock_object() directly (in sysvipc_find_ipc).
2186 * In order to stay compatible with sem_lock(), we must wait until
2187 * all simple semop() calls have left their critical regions.
2188 */
2189 sem_wait_array(sma);
2190
2191 sem_otime = get_semotime(sma);
2192
2193 return seq_printf(s,
2194 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2195 sma->sem_perm.key,
2196 sma->sem_perm.id,
2197 sma->sem_perm.mode,
2198 sma->sem_nsems,
2199 from_kuid_munged(user_ns, sma->sem_perm.uid),
2200 from_kgid_munged(user_ns, sma->sem_perm.gid),
2201 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2202 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2203 sem_otime,
2204 sma->sem_ctime);
2205 }
2206 #endif
2207