1 /*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 *
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
7 *
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
12 * Lockless wakeup
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
26 *
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
29 * protection)
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
33 * SETALL calls.
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
40 *
41 * Internals:
42 * - scalability:
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semncnt() and
51 * count_semzcnt()
52 * - the task that performs a successful semop() scans the list of all
53 * sleeping tasks and completes any pending operations that can be fulfilled.
54 * Semaphores are actively given to waiting tasks (necessary for FIFO).
55 * (see update_queue())
56 * - To improve the scalability, the actual wake-up calls are performed after
57 * dropping all locks. (see wake_up_sem_queue_prepare(),
58 * wake_up_sem_queue_do())
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 * have been destroyed already by a semctl(RMID).
63 * - The synchronizations between wake-ups due to a timeout/signal and a
64 * wake-up due to a completed semaphore operation is achieved by using an
65 * intermediate state (IN_WAKEUP).
66 * - UNDO values are stored in an array (one per process and per
67 * semaphore array, lazily allocated). For backwards compatibility, multiple
68 * modes for the UNDO variables are supported (per process, per thread)
69 * (see copy_semundo, CLONE_SYSVSEM)
70 * - There are two lists of the pending operations: a per-array list
71 * and per-semaphore list (stored in the array). This allows to achieve FIFO
72 * ordering without always scanning all pending operations.
73 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
74 */
75
76 #include <linux/slab.h>
77 #include <linux/spinlock.h>
78 #include <linux/init.h>
79 #include <linux/proc_fs.h>
80 #include <linux/time.h>
81 #include <linux/security.h>
82 #include <linux/syscalls.h>
83 #include <linux/audit.h>
84 #include <linux/capability.h>
85 #include <linux/seq_file.h>
86 #include <linux/rwsem.h>
87 #include <linux/nsproxy.h>
88 #include <linux/ipc_namespace.h>
89
90 #include <asm/uaccess.h>
91 #include "util.h"
92
93 /* One semaphore structure for each semaphore in the system. */
94 struct sem {
95 int semval; /* current value */
96 int sempid; /* pid of last operation */
97 spinlock_t lock; /* spinlock for fine-grained semtimedop */
98 struct list_head sem_pending; /* pending single-sop operations */
99 };
100
101 /* One queue for each sleeping process in the system. */
102 struct sem_queue {
103 struct list_head list; /* queue of pending operations */
104 struct task_struct *sleeper; /* this process */
105 struct sem_undo *undo; /* undo structure */
106 int pid; /* process id of requesting process */
107 int status; /* completion status of operation */
108 struct sembuf *sops; /* array of pending operations */
109 int nsops; /* number of operations */
110 int alter; /* does *sops alter the array? */
111 };
112
113 /* Each task has a list of undo requests. They are executed automatically
114 * when the process exits.
115 */
116 struct sem_undo {
117 struct list_head list_proc; /* per-process list: *
118 * all undos from one process
119 * rcu protected */
120 struct rcu_head rcu; /* rcu struct for sem_undo */
121 struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
122 struct list_head list_id; /* per semaphore array list:
123 * all undos for one array */
124 int semid; /* semaphore set identifier */
125 short *semadj; /* array of adjustments */
126 /* one per semaphore */
127 };
128
129 /* sem_undo_list controls shared access to the list of sem_undo structures
130 * that may be shared among all a CLONE_SYSVSEM task group.
131 */
132 struct sem_undo_list {
133 atomic_t refcnt;
134 spinlock_t lock;
135 struct list_head list_proc;
136 };
137
138
139 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
140
141 #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
142
143 static int newary(struct ipc_namespace *, struct ipc_params *);
144 static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
145 #ifdef CONFIG_PROC_FS
146 static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
147 #endif
148
149 #define SEMMSL_FAST 256 /* 512 bytes on stack */
150 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
151
152 /*
153 * linked list protection:
154 * sem_undo.id_next,
155 * sem_array.sem_pending{,last},
156 * sem_array.sem_undo: sem_lock() for read/write
157 * sem_undo.proc_next: only "current" is allowed to read/write that field.
158 *
159 */
160
161 #define sc_semmsl sem_ctls[0]
162 #define sc_semmns sem_ctls[1]
163 #define sc_semopm sem_ctls[2]
164 #define sc_semmni sem_ctls[3]
165
sem_init_ns(struct ipc_namespace * ns)166 void sem_init_ns(struct ipc_namespace *ns)
167 {
168 ns->sc_semmsl = SEMMSL;
169 ns->sc_semmns = SEMMNS;
170 ns->sc_semopm = SEMOPM;
171 ns->sc_semmni = SEMMNI;
172 ns->used_sems = 0;
173 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
174 }
175
176 #ifdef CONFIG_IPC_NS
sem_exit_ns(struct ipc_namespace * ns)177 void sem_exit_ns(struct ipc_namespace *ns)
178 {
179 free_ipcs(ns, &sem_ids(ns), freeary);
180 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
181 }
182 #endif
183
sem_init(void)184 void __init sem_init (void)
185 {
186 sem_init_ns(&init_ipc_ns);
187 ipc_init_proc_interface("sysvipc/sem",
188 " key semid perms nsems uid gid cuid cgid otime ctime\n",
189 IPC_SEM_IDS, sysvipc_sem_proc_show);
190 }
191
sem_rcu_free(struct rcu_head * head)192 static void sem_rcu_free(struct rcu_head *head)
193 {
194 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
195 struct sem_array *sma = ipc_rcu_to_struct(p);
196
197 security_sem_free(sma);
198 ipc_rcu_free(head);
199 }
200
201 /*
202 * If the request contains only one semaphore operation, and there are
203 * no complex transactions pending, lock only the semaphore involved.
204 * Otherwise, lock the entire semaphore array, since we either have
205 * multiple semaphores in our own semops, or we need to look at
206 * semaphores from other pending complex operations.
207 *
208 * Carefully guard against sma->complex_count changing between zero
209 * and non-zero while we are spinning for the lock. The value of
210 * sma->complex_count cannot change while we are holding the lock,
211 * so sem_unlock should be fine.
212 *
213 * The global lock path checks that all the local locks have been released,
214 * checking each local lock once. This means that the local lock paths
215 * cannot start their critical sections while the global lock is held.
216 */
sem_lock(struct sem_array * sma,struct sembuf * sops,int nsops)217 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
218 int nsops)
219 {
220 int locknum;
221 again:
222 if (nsops == 1 && !sma->complex_count) {
223 struct sem *sem = sma->sem_base + sops->sem_num;
224
225 /* Lock just the semaphore we are interested in. */
226 spin_lock(&sem->lock);
227
228 /*
229 * If sma->complex_count was set while we were spinning,
230 * we may need to look at things we did not lock here.
231 */
232 if (unlikely(sma->complex_count)) {
233 spin_unlock(&sem->lock);
234 goto lock_array;
235 }
236
237 /*
238 * Another process is holding the global lock on the
239 * sem_array; we cannot enter our critical section,
240 * but have to wait for the global lock to be released.
241 */
242 if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
243 spin_unlock(&sem->lock);
244 spin_unlock_wait(&sma->sem_perm.lock);
245 goto again;
246 }
247
248 locknum = sops->sem_num;
249 } else {
250 int i;
251 /*
252 * Lock the semaphore array, and wait for all of the
253 * individual semaphore locks to go away. The code
254 * above ensures no new single-lock holders will enter
255 * their critical section while the array lock is held.
256 */
257 lock_array:
258 spin_lock(&sma->sem_perm.lock);
259 for (i = 0; i < sma->sem_nsems; i++) {
260 struct sem *sem = sma->sem_base + i;
261 spin_unlock_wait(&sem->lock);
262 }
263 locknum = -1;
264 }
265 return locknum;
266 }
267
sem_unlock(struct sem_array * sma,int locknum)268 static inline void sem_unlock(struct sem_array *sma, int locknum)
269 {
270 if (locknum == -1) {
271 spin_unlock(&sma->sem_perm.lock);
272 } else {
273 struct sem *sem = sma->sem_base + locknum;
274 spin_unlock(&sem->lock);
275 }
276 }
277
278 /*
279 * sem_lock_(check_) routines are called in the paths where the rw_mutex
280 * is not held.
281 *
282 * The caller holds the RCU read lock.
283 */
sem_obtain_lock(struct ipc_namespace * ns,int id,struct sembuf * sops,int nsops,int * locknum)284 static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
285 int id, struct sembuf *sops, int nsops, int *locknum)
286 {
287 struct kern_ipc_perm *ipcp;
288 struct sem_array *sma;
289
290 ipcp = ipc_obtain_object(&sem_ids(ns), id);
291 if (IS_ERR(ipcp))
292 return ERR_CAST(ipcp);
293
294 sma = container_of(ipcp, struct sem_array, sem_perm);
295 *locknum = sem_lock(sma, sops, nsops);
296
297 /* ipc_rmid() may have already freed the ID while sem_lock
298 * was spinning: verify that the structure is still valid
299 */
300 if (!ipcp->deleted)
301 return container_of(ipcp, struct sem_array, sem_perm);
302
303 sem_unlock(sma, *locknum);
304 return ERR_PTR(-EINVAL);
305 }
306
sem_obtain_object(struct ipc_namespace * ns,int id)307 static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
308 {
309 struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
310
311 if (IS_ERR(ipcp))
312 return ERR_CAST(ipcp);
313
314 return container_of(ipcp, struct sem_array, sem_perm);
315 }
316
sem_obtain_object_check(struct ipc_namespace * ns,int id)317 static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
318 int id)
319 {
320 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
321
322 if (IS_ERR(ipcp))
323 return ERR_CAST(ipcp);
324
325 return container_of(ipcp, struct sem_array, sem_perm);
326 }
327
sem_lock_and_putref(struct sem_array * sma)328 static inline void sem_lock_and_putref(struct sem_array *sma)
329 {
330 sem_lock(sma, NULL, -1);
331 ipc_rcu_putref(sma, ipc_rcu_free);
332 }
333
sem_rmid(struct ipc_namespace * ns,struct sem_array * s)334 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
335 {
336 ipc_rmid(&sem_ids(ns), &s->sem_perm);
337 }
338
339 /*
340 * Lockless wakeup algorithm:
341 * Without the check/retry algorithm a lockless wakeup is possible:
342 * - queue.status is initialized to -EINTR before blocking.
343 * - wakeup is performed by
344 * * unlinking the queue entry from sma->sem_pending
345 * * setting queue.status to IN_WAKEUP
346 * This is the notification for the blocked thread that a
347 * result value is imminent.
348 * * call wake_up_process
349 * * set queue.status to the final value.
350 * - the previously blocked thread checks queue.status:
351 * * if it's IN_WAKEUP, then it must wait until the value changes
352 * * if it's not -EINTR, then the operation was completed by
353 * update_queue. semtimedop can return queue.status without
354 * performing any operation on the sem array.
355 * * otherwise it must acquire the spinlock and check what's up.
356 *
357 * The two-stage algorithm is necessary to protect against the following
358 * races:
359 * - if queue.status is set after wake_up_process, then the woken up idle
360 * thread could race forward and try (and fail) to acquire sma->lock
361 * before update_queue had a chance to set queue.status
362 * - if queue.status is written before wake_up_process and if the
363 * blocked process is woken up by a signal between writing
364 * queue.status and the wake_up_process, then the woken up
365 * process could return from semtimedop and die by calling
366 * sys_exit before wake_up_process is called. Then wake_up_process
367 * will oops, because the task structure is already invalid.
368 * (yes, this happened on s390 with sysv msg).
369 *
370 */
371 #define IN_WAKEUP 1
372
373 /**
374 * newary - Create a new semaphore set
375 * @ns: namespace
376 * @params: ptr to the structure that contains key, semflg and nsems
377 *
378 * Called with sem_ids.rw_mutex held (as a writer)
379 */
380
newary(struct ipc_namespace * ns,struct ipc_params * params)381 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
382 {
383 int id;
384 int retval;
385 struct sem_array *sma;
386 int size;
387 key_t key = params->key;
388 int nsems = params->u.nsems;
389 int semflg = params->flg;
390 int i;
391
392 if (!nsems)
393 return -EINVAL;
394 if (ns->used_sems + nsems > ns->sc_semmns)
395 return -ENOSPC;
396
397 size = sizeof (*sma) + nsems * sizeof (struct sem);
398 sma = ipc_rcu_alloc(size);
399 if (!sma) {
400 return -ENOMEM;
401 }
402 memset (sma, 0, size);
403
404 sma->sem_perm.mode = (semflg & S_IRWXUGO);
405 sma->sem_perm.key = key;
406
407 sma->sem_perm.security = NULL;
408 retval = security_sem_alloc(sma);
409 if (retval) {
410 ipc_rcu_putref(sma, ipc_rcu_free);
411 return retval;
412 }
413
414 sma->sem_base = (struct sem *) &sma[1];
415
416 for (i = 0; i < nsems; i++) {
417 INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
418 spin_lock_init(&sma->sem_base[i].lock);
419 }
420
421 sma->complex_count = 0;
422 INIT_LIST_HEAD(&sma->sem_pending);
423 INIT_LIST_HEAD(&sma->list_id);
424 sma->sem_nsems = nsems;
425 sma->sem_ctime = get_seconds();
426
427 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
428 if (id < 0) {
429 ipc_rcu_putref(sma, sem_rcu_free);
430 return id;
431 }
432 ns->used_sems += nsems;
433
434 sem_unlock(sma, -1);
435 rcu_read_unlock();
436
437 return sma->sem_perm.id;
438 }
439
440
441 /*
442 * Called with sem_ids.rw_mutex and ipcp locked.
443 */
sem_security(struct kern_ipc_perm * ipcp,int semflg)444 static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
445 {
446 struct sem_array *sma;
447
448 sma = container_of(ipcp, struct sem_array, sem_perm);
449 return security_sem_associate(sma, semflg);
450 }
451
452 /*
453 * Called with sem_ids.rw_mutex and ipcp locked.
454 */
sem_more_checks(struct kern_ipc_perm * ipcp,struct ipc_params * params)455 static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
456 struct ipc_params *params)
457 {
458 struct sem_array *sma;
459
460 sma = container_of(ipcp, struct sem_array, sem_perm);
461 if (params->u.nsems > sma->sem_nsems)
462 return -EINVAL;
463
464 return 0;
465 }
466
SYSCALL_DEFINE3(semget,key_t,key,int,nsems,int,semflg)467 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
468 {
469 struct ipc_namespace *ns;
470 struct ipc_ops sem_ops;
471 struct ipc_params sem_params;
472
473 ns = current->nsproxy->ipc_ns;
474
475 if (nsems < 0 || nsems > ns->sc_semmsl)
476 return -EINVAL;
477
478 sem_ops.getnew = newary;
479 sem_ops.associate = sem_security;
480 sem_ops.more_checks = sem_more_checks;
481
482 sem_params.key = key;
483 sem_params.flg = semflg;
484 sem_params.u.nsems = nsems;
485
486 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
487 }
488
489 /*
490 * Determine whether a sequence of semaphore operations would succeed
491 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
492 */
493
try_atomic_semop(struct sem_array * sma,struct sembuf * sops,int nsops,struct sem_undo * un,int pid)494 static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
495 int nsops, struct sem_undo *un, int pid)
496 {
497 int result, sem_op;
498 struct sembuf *sop;
499 struct sem * curr;
500
501 for (sop = sops; sop < sops + nsops; sop++) {
502 curr = sma->sem_base + sop->sem_num;
503 sem_op = sop->sem_op;
504 result = curr->semval;
505
506 if (!sem_op && result)
507 goto would_block;
508
509 result += sem_op;
510 if (result < 0)
511 goto would_block;
512 if (result > SEMVMX)
513 goto out_of_range;
514 if (sop->sem_flg & SEM_UNDO) {
515 int undo = un->semadj[sop->sem_num] - sem_op;
516 /*
517 * Exceeding the undo range is an error.
518 */
519 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
520 goto out_of_range;
521 }
522 curr->semval = result;
523 }
524
525 sop--;
526 while (sop >= sops) {
527 sma->sem_base[sop->sem_num].sempid = pid;
528 if (sop->sem_flg & SEM_UNDO)
529 un->semadj[sop->sem_num] -= sop->sem_op;
530 sop--;
531 }
532
533 return 0;
534
535 out_of_range:
536 result = -ERANGE;
537 goto undo;
538
539 would_block:
540 if (sop->sem_flg & IPC_NOWAIT)
541 result = -EAGAIN;
542 else
543 result = 1;
544
545 undo:
546 sop--;
547 while (sop >= sops) {
548 sma->sem_base[sop->sem_num].semval -= sop->sem_op;
549 sop--;
550 }
551
552 return result;
553 }
554
555 /** wake_up_sem_queue_prepare(q, error): Prepare wake-up
556 * @q: queue entry that must be signaled
557 * @error: Error value for the signal
558 *
559 * Prepare the wake-up of the queue entry q.
560 */
wake_up_sem_queue_prepare(struct list_head * pt,struct sem_queue * q,int error)561 static void wake_up_sem_queue_prepare(struct list_head *pt,
562 struct sem_queue *q, int error)
563 {
564 if (list_empty(pt)) {
565 /*
566 * Hold preempt off so that we don't get preempted and have the
567 * wakee busy-wait until we're scheduled back on.
568 */
569 preempt_disable();
570 }
571 q->status = IN_WAKEUP;
572 q->pid = error;
573
574 list_add_tail(&q->list, pt);
575 }
576
577 /**
578 * wake_up_sem_queue_do(pt) - do the actual wake-up
579 * @pt: list of tasks to be woken up
580 *
581 * Do the actual wake-up.
582 * The function is called without any locks held, thus the semaphore array
583 * could be destroyed already and the tasks can disappear as soon as the
584 * status is set to the actual return code.
585 */
wake_up_sem_queue_do(struct list_head * pt)586 static void wake_up_sem_queue_do(struct list_head *pt)
587 {
588 struct sem_queue *q, *t;
589 int did_something;
590
591 did_something = !list_empty(pt);
592 list_for_each_entry_safe(q, t, pt, list) {
593 wake_up_process(q->sleeper);
594 /* q can disappear immediately after writing q->status. */
595 smp_wmb();
596 q->status = q->pid;
597 }
598 if (did_something)
599 preempt_enable();
600 }
601
unlink_queue(struct sem_array * sma,struct sem_queue * q)602 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
603 {
604 list_del(&q->list);
605 if (q->nsops > 1)
606 sma->complex_count--;
607 }
608
609 /** check_restart(sma, q)
610 * @sma: semaphore array
611 * @q: the operation that just completed
612 *
613 * update_queue is O(N^2) when it restarts scanning the whole queue of
614 * waiting operations. Therefore this function checks if the restart is
615 * really necessary. It is called after a previously waiting operation
616 * was completed.
617 */
check_restart(struct sem_array * sma,struct sem_queue * q)618 static int check_restart(struct sem_array *sma, struct sem_queue *q)
619 {
620 struct sem *curr;
621 struct sem_queue *h;
622
623 /* if the operation didn't modify the array, then no restart */
624 if (q->alter == 0)
625 return 0;
626
627 /* pending complex operations are too difficult to analyse */
628 if (sma->complex_count)
629 return 1;
630
631 /* we were a sleeping complex operation. Too difficult */
632 if (q->nsops > 1)
633 return 1;
634
635 curr = sma->sem_base + q->sops[0].sem_num;
636
637 /* No-one waits on this queue */
638 if (list_empty(&curr->sem_pending))
639 return 0;
640
641 /* the new semaphore value */
642 if (curr->semval) {
643 /* It is impossible that someone waits for the new value:
644 * - q is a previously sleeping simple operation that
645 * altered the array. It must be a decrement, because
646 * simple increments never sleep.
647 * - The value is not 0, thus wait-for-zero won't proceed.
648 * - If there are older (higher priority) decrements
649 * in the queue, then they have observed the original
650 * semval value and couldn't proceed. The operation
651 * decremented to value - thus they won't proceed either.
652 */
653 BUG_ON(q->sops[0].sem_op >= 0);
654 return 0;
655 }
656 /*
657 * semval is 0. Check if there are wait-for-zero semops.
658 * They must be the first entries in the per-semaphore queue
659 */
660 h = list_first_entry(&curr->sem_pending, struct sem_queue, list);
661 BUG_ON(h->nsops != 1);
662 BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
663
664 /* Yes, there is a wait-for-zero semop. Restart */
665 if (h->sops[0].sem_op == 0)
666 return 1;
667
668 /* Again - no-one is waiting for the new value. */
669 return 0;
670 }
671
672
673 /**
674 * update_queue(sma, semnum): Look for tasks that can be completed.
675 * @sma: semaphore array.
676 * @semnum: semaphore that was modified.
677 * @pt: list head for the tasks that must be woken up.
678 *
679 * update_queue must be called after a semaphore in a semaphore array
680 * was modified. If multiple semaphores were modified, update_queue must
681 * be called with semnum = -1, as well as with the number of each modified
682 * semaphore.
683 * The tasks that must be woken up are added to @pt. The return code
684 * is stored in q->pid.
685 * The function return 1 if at least one semop was completed successfully.
686 */
update_queue(struct sem_array * sma,int semnum,struct list_head * pt)687 static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
688 {
689 struct sem_queue *q;
690 struct list_head *walk;
691 struct list_head *pending_list;
692 int semop_completed = 0;
693
694 if (semnum == -1)
695 pending_list = &sma->sem_pending;
696 else
697 pending_list = &sma->sem_base[semnum].sem_pending;
698
699 again:
700 walk = pending_list->next;
701 while (walk != pending_list) {
702 int error, restart;
703
704 q = container_of(walk, struct sem_queue, list);
705 walk = walk->next;
706
707 /* If we are scanning the single sop, per-semaphore list of
708 * one semaphore and that semaphore is 0, then it is not
709 * necessary to scan the "alter" entries: simple increments
710 * that affect only one entry succeed immediately and cannot
711 * be in the per semaphore pending queue, and decrements
712 * cannot be successful if the value is already 0.
713 */
714 if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
715 q->alter)
716 break;
717
718 error = try_atomic_semop(sma, q->sops, q->nsops,
719 q->undo, q->pid);
720
721 /* Does q->sleeper still need to sleep? */
722 if (error > 0)
723 continue;
724
725 unlink_queue(sma, q);
726
727 if (error) {
728 restart = 0;
729 } else {
730 semop_completed = 1;
731 restart = check_restart(sma, q);
732 }
733
734 wake_up_sem_queue_prepare(pt, q, error);
735 if (restart)
736 goto again;
737 }
738 return semop_completed;
739 }
740
741 /**
742 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
743 * @sma: semaphore array
744 * @sops: operations that were performed
745 * @nsops: number of operations
746 * @otime: force setting otime
747 * @pt: list head of the tasks that must be woken up.
748 *
749 * do_smart_update() does the required called to update_queue, based on the
750 * actual changes that were performed on the semaphore array.
751 * Note that the function does not do the actual wake-up: the caller is
752 * responsible for calling wake_up_sem_queue_do(@pt).
753 * It is safe to perform this call after dropping all locks.
754 */
do_smart_update(struct sem_array * sma,struct sembuf * sops,int nsops,int otime,struct list_head * pt)755 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
756 int otime, struct list_head *pt)
757 {
758 int i;
759 int progress;
760
761 progress = 1;
762 retry_global:
763 if (sma->complex_count) {
764 if (update_queue(sma, -1, pt)) {
765 progress = 1;
766 otime = 1;
767 sops = NULL;
768 }
769 }
770 if (!progress)
771 goto done;
772
773 if (!sops) {
774 /* No semops; something special is going on. */
775 for (i = 0; i < sma->sem_nsems; i++) {
776 if (update_queue(sma, i, pt)) {
777 otime = 1;
778 progress = 1;
779 }
780 }
781 goto done_checkretry;
782 }
783
784 /* Check the semaphores that were modified. */
785 for (i = 0; i < nsops; i++) {
786 if (sops[i].sem_op > 0 ||
787 (sops[i].sem_op < 0 &&
788 sma->sem_base[sops[i].sem_num].semval == 0))
789 if (update_queue(sma, sops[i].sem_num, pt)) {
790 otime = 1;
791 progress = 1;
792 }
793 }
794 done_checkretry:
795 if (progress) {
796 progress = 0;
797 goto retry_global;
798 }
799 done:
800 if (otime)
801 sma->sem_otime = get_seconds();
802 }
803
804
805 /* The following counts are associated to each semaphore:
806 * semncnt number of tasks waiting on semval being nonzero
807 * semzcnt number of tasks waiting on semval being zero
808 * This model assumes that a task waits on exactly one semaphore.
809 * Since semaphore operations are to be performed atomically, tasks actually
810 * wait on a whole sequence of semaphores simultaneously.
811 * The counts we return here are a rough approximation, but still
812 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
813 */
count_semncnt(struct sem_array * sma,ushort semnum)814 static int count_semncnt (struct sem_array * sma, ushort semnum)
815 {
816 int semncnt;
817 struct sem_queue * q;
818
819 semncnt = 0;
820 list_for_each_entry(q, &sma->sem_base[semnum].sem_pending, list) {
821 struct sembuf * sops = q->sops;
822 BUG_ON(sops->sem_num != semnum);
823 if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
824 semncnt++;
825 }
826
827 list_for_each_entry(q, &sma->sem_pending, list) {
828 struct sembuf * sops = q->sops;
829 int nsops = q->nsops;
830 int i;
831 for (i = 0; i < nsops; i++)
832 if (sops[i].sem_num == semnum
833 && (sops[i].sem_op < 0)
834 && !(sops[i].sem_flg & IPC_NOWAIT))
835 semncnt++;
836 }
837 return semncnt;
838 }
839
count_semzcnt(struct sem_array * sma,ushort semnum)840 static int count_semzcnt (struct sem_array * sma, ushort semnum)
841 {
842 int semzcnt;
843 struct sem_queue * q;
844
845 semzcnt = 0;
846 list_for_each_entry(q, &sma->sem_base[semnum].sem_pending, list) {
847 struct sembuf * sops = q->sops;
848 BUG_ON(sops->sem_num != semnum);
849 if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
850 semzcnt++;
851 }
852
853 list_for_each_entry(q, &sma->sem_pending, list) {
854 struct sembuf * sops = q->sops;
855 int nsops = q->nsops;
856 int i;
857 for (i = 0; i < nsops; i++)
858 if (sops[i].sem_num == semnum
859 && (sops[i].sem_op == 0)
860 && !(sops[i].sem_flg & IPC_NOWAIT))
861 semzcnt++;
862 }
863 return semzcnt;
864 }
865
866 /* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
867 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
868 * remains locked on exit.
869 */
freeary(struct ipc_namespace * ns,struct kern_ipc_perm * ipcp)870 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
871 {
872 struct sem_undo *un, *tu;
873 struct sem_queue *q, *tq;
874 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
875 struct list_head tasks;
876 int i;
877
878 /* Free the existing undo structures for this semaphore set. */
879 assert_spin_locked(&sma->sem_perm.lock);
880 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
881 list_del(&un->list_id);
882 spin_lock(&un->ulp->lock);
883 un->semid = -1;
884 list_del_rcu(&un->list_proc);
885 spin_unlock(&un->ulp->lock);
886 kfree_rcu(un, rcu);
887 }
888
889 /* Wake up all pending processes and let them fail with EIDRM. */
890 INIT_LIST_HEAD(&tasks);
891 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
892 unlink_queue(sma, q);
893 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
894 }
895 for (i = 0; i < sma->sem_nsems; i++) {
896 struct sem *sem = sma->sem_base + i;
897 list_for_each_entry_safe(q, tq, &sem->sem_pending, list) {
898 unlink_queue(sma, q);
899 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
900 }
901 }
902
903 /* Remove the semaphore set from the IDR */
904 sem_rmid(ns, sma);
905 sem_unlock(sma, -1);
906 rcu_read_unlock();
907
908 wake_up_sem_queue_do(&tasks);
909 ns->used_sems -= sma->sem_nsems;
910 ipc_rcu_putref(sma, sem_rcu_free);
911 }
912
copy_semid_to_user(void __user * buf,struct semid64_ds * in,int version)913 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
914 {
915 switch(version) {
916 case IPC_64:
917 return copy_to_user(buf, in, sizeof(*in));
918 case IPC_OLD:
919 {
920 struct semid_ds out;
921
922 memset(&out, 0, sizeof(out));
923
924 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
925
926 out.sem_otime = in->sem_otime;
927 out.sem_ctime = in->sem_ctime;
928 out.sem_nsems = in->sem_nsems;
929
930 return copy_to_user(buf, &out, sizeof(out));
931 }
932 default:
933 return -EINVAL;
934 }
935 }
936
semctl_nolock(struct ipc_namespace * ns,int semid,int cmd,int version,void __user * p)937 static int semctl_nolock(struct ipc_namespace *ns, int semid,
938 int cmd, int version, void __user *p)
939 {
940 int err;
941 struct sem_array *sma;
942
943 switch(cmd) {
944 case IPC_INFO:
945 case SEM_INFO:
946 {
947 struct seminfo seminfo;
948 int max_id;
949
950 err = security_sem_semctl(NULL, cmd);
951 if (err)
952 return err;
953
954 memset(&seminfo,0,sizeof(seminfo));
955 seminfo.semmni = ns->sc_semmni;
956 seminfo.semmns = ns->sc_semmns;
957 seminfo.semmsl = ns->sc_semmsl;
958 seminfo.semopm = ns->sc_semopm;
959 seminfo.semvmx = SEMVMX;
960 seminfo.semmnu = SEMMNU;
961 seminfo.semmap = SEMMAP;
962 seminfo.semume = SEMUME;
963 down_read(&sem_ids(ns).rw_mutex);
964 if (cmd == SEM_INFO) {
965 seminfo.semusz = sem_ids(ns).in_use;
966 seminfo.semaem = ns->used_sems;
967 } else {
968 seminfo.semusz = SEMUSZ;
969 seminfo.semaem = SEMAEM;
970 }
971 max_id = ipc_get_maxid(&sem_ids(ns));
972 up_read(&sem_ids(ns).rw_mutex);
973 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
974 return -EFAULT;
975 return (max_id < 0) ? 0: max_id;
976 }
977 case IPC_STAT:
978 case SEM_STAT:
979 {
980 struct semid64_ds tbuf;
981 int id = 0;
982
983 memset(&tbuf, 0, sizeof(tbuf));
984
985 rcu_read_lock();
986 if (cmd == SEM_STAT) {
987 sma = sem_obtain_object(ns, semid);
988 if (IS_ERR(sma)) {
989 err = PTR_ERR(sma);
990 goto out_unlock;
991 }
992 id = sma->sem_perm.id;
993 } else {
994 sma = sem_obtain_object_check(ns, semid);
995 if (IS_ERR(sma)) {
996 err = PTR_ERR(sma);
997 goto out_unlock;
998 }
999 }
1000
1001 err = -EACCES;
1002 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1003 goto out_unlock;
1004
1005 err = security_sem_semctl(sma, cmd);
1006 if (err)
1007 goto out_unlock;
1008
1009 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1010 tbuf.sem_otime = sma->sem_otime;
1011 tbuf.sem_ctime = sma->sem_ctime;
1012 tbuf.sem_nsems = sma->sem_nsems;
1013 rcu_read_unlock();
1014 if (copy_semid_to_user(p, &tbuf, version))
1015 return -EFAULT;
1016 return id;
1017 }
1018 default:
1019 return -EINVAL;
1020 }
1021 out_unlock:
1022 rcu_read_unlock();
1023 return err;
1024 }
1025
semctl_setval(struct ipc_namespace * ns,int semid,int semnum,unsigned long arg)1026 static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1027 unsigned long arg)
1028 {
1029 struct sem_undo *un;
1030 struct sem_array *sma;
1031 struct sem* curr;
1032 int err;
1033 struct list_head tasks;
1034 int val;
1035 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1036 /* big-endian 64bit */
1037 val = arg >> 32;
1038 #else
1039 /* 32bit or little-endian 64bit */
1040 val = arg;
1041 #endif
1042
1043 if (val > SEMVMX || val < 0)
1044 return -ERANGE;
1045
1046 INIT_LIST_HEAD(&tasks);
1047
1048 rcu_read_lock();
1049 sma = sem_obtain_object_check(ns, semid);
1050 if (IS_ERR(sma)) {
1051 rcu_read_unlock();
1052 return PTR_ERR(sma);
1053 }
1054
1055 if (semnum < 0 || semnum >= sma->sem_nsems) {
1056 rcu_read_unlock();
1057 return -EINVAL;
1058 }
1059
1060
1061 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1062 rcu_read_unlock();
1063 return -EACCES;
1064 }
1065
1066 err = security_sem_semctl(sma, SETVAL);
1067 if (err) {
1068 rcu_read_unlock();
1069 return -EACCES;
1070 }
1071
1072 sem_lock(sma, NULL, -1);
1073
1074 curr = &sma->sem_base[semnum];
1075
1076 assert_spin_locked(&sma->sem_perm.lock);
1077 list_for_each_entry(un, &sma->list_id, list_id)
1078 un->semadj[semnum] = 0;
1079
1080 curr->semval = val;
1081 curr->sempid = task_tgid_vnr(current);
1082 sma->sem_ctime = get_seconds();
1083 /* maybe some queued-up processes were waiting for this */
1084 do_smart_update(sma, NULL, 0, 0, &tasks);
1085 sem_unlock(sma, -1);
1086 rcu_read_unlock();
1087 wake_up_sem_queue_do(&tasks);
1088 return 0;
1089 }
1090
semctl_main(struct ipc_namespace * ns,int semid,int semnum,int cmd,void __user * p)1091 static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1092 int cmd, void __user *p)
1093 {
1094 struct sem_array *sma;
1095 struct sem* curr;
1096 int err, nsems;
1097 ushort fast_sem_io[SEMMSL_FAST];
1098 ushort* sem_io = fast_sem_io;
1099 struct list_head tasks;
1100
1101 INIT_LIST_HEAD(&tasks);
1102
1103 rcu_read_lock();
1104 sma = sem_obtain_object_check(ns, semid);
1105 if (IS_ERR(sma)) {
1106 rcu_read_unlock();
1107 return PTR_ERR(sma);
1108 }
1109
1110 nsems = sma->sem_nsems;
1111
1112 err = -EACCES;
1113 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1114 goto out_rcu_wakeup;
1115
1116 err = security_sem_semctl(sma, cmd);
1117 if (err)
1118 goto out_rcu_wakeup;
1119
1120 err = -EACCES;
1121 switch (cmd) {
1122 case GETALL:
1123 {
1124 ushort __user *array = p;
1125 int i;
1126
1127 sem_lock(sma, NULL, -1);
1128 if(nsems > SEMMSL_FAST) {
1129 if (!ipc_rcu_getref(sma)) {
1130 sem_unlock(sma, -1);
1131 rcu_read_unlock();
1132 err = -EIDRM;
1133 goto out_free;
1134 }
1135 sem_unlock(sma, -1);
1136 rcu_read_unlock();
1137 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1138 if(sem_io == NULL) {
1139 ipc_rcu_putref(sma, ipc_rcu_free);
1140 return -ENOMEM;
1141 }
1142
1143 rcu_read_lock();
1144 sem_lock_and_putref(sma);
1145 if (sma->sem_perm.deleted) {
1146 sem_unlock(sma, -1);
1147 rcu_read_unlock();
1148 err = -EIDRM;
1149 goto out_free;
1150 }
1151 }
1152 for (i = 0; i < sma->sem_nsems; i++)
1153 sem_io[i] = sma->sem_base[i].semval;
1154 sem_unlock(sma, -1);
1155 rcu_read_unlock();
1156 err = 0;
1157 if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1158 err = -EFAULT;
1159 goto out_free;
1160 }
1161 case SETALL:
1162 {
1163 int i;
1164 struct sem_undo *un;
1165
1166 if (!ipc_rcu_getref(sma)) {
1167 rcu_read_unlock();
1168 return -EIDRM;
1169 }
1170 rcu_read_unlock();
1171
1172 if(nsems > SEMMSL_FAST) {
1173 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1174 if(sem_io == NULL) {
1175 ipc_rcu_putref(sma, ipc_rcu_free);
1176 return -ENOMEM;
1177 }
1178 }
1179
1180 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
1181 ipc_rcu_putref(sma, ipc_rcu_free);
1182 err = -EFAULT;
1183 goto out_free;
1184 }
1185
1186 for (i = 0; i < nsems; i++) {
1187 if (sem_io[i] > SEMVMX) {
1188 ipc_rcu_putref(sma, ipc_rcu_free);
1189 err = -ERANGE;
1190 goto out_free;
1191 }
1192 }
1193 rcu_read_lock();
1194 sem_lock_and_putref(sma);
1195 if (sma->sem_perm.deleted) {
1196 sem_unlock(sma, -1);
1197 rcu_read_unlock();
1198 err = -EIDRM;
1199 goto out_free;
1200 }
1201
1202 for (i = 0; i < nsems; i++)
1203 sma->sem_base[i].semval = sem_io[i];
1204
1205 assert_spin_locked(&sma->sem_perm.lock);
1206 list_for_each_entry(un, &sma->list_id, list_id) {
1207 for (i = 0; i < nsems; i++)
1208 un->semadj[i] = 0;
1209 }
1210 sma->sem_ctime = get_seconds();
1211 /* maybe some queued-up processes were waiting for this */
1212 do_smart_update(sma, NULL, 0, 0, &tasks);
1213 err = 0;
1214 goto out_unlock;
1215 }
1216 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1217 }
1218 err = -EINVAL;
1219 if (semnum < 0 || semnum >= nsems)
1220 goto out_rcu_wakeup;
1221
1222 sem_lock(sma, NULL, -1);
1223 curr = &sma->sem_base[semnum];
1224
1225 switch (cmd) {
1226 case GETVAL:
1227 err = curr->semval;
1228 goto out_unlock;
1229 case GETPID:
1230 err = curr->sempid;
1231 goto out_unlock;
1232 case GETNCNT:
1233 err = count_semncnt(sma,semnum);
1234 goto out_unlock;
1235 case GETZCNT:
1236 err = count_semzcnt(sma,semnum);
1237 goto out_unlock;
1238 }
1239
1240 out_unlock:
1241 sem_unlock(sma, -1);
1242 out_rcu_wakeup:
1243 rcu_read_unlock();
1244 wake_up_sem_queue_do(&tasks);
1245 out_free:
1246 if(sem_io != fast_sem_io)
1247 ipc_free(sem_io, sizeof(ushort)*nsems);
1248 return err;
1249 }
1250
1251 static inline unsigned long
copy_semid_from_user(struct semid64_ds * out,void __user * buf,int version)1252 copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1253 {
1254 switch(version) {
1255 case IPC_64:
1256 if (copy_from_user(out, buf, sizeof(*out)))
1257 return -EFAULT;
1258 return 0;
1259 case IPC_OLD:
1260 {
1261 struct semid_ds tbuf_old;
1262
1263 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1264 return -EFAULT;
1265
1266 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1267 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1268 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1269
1270 return 0;
1271 }
1272 default:
1273 return -EINVAL;
1274 }
1275 }
1276
1277 /*
1278 * This function handles some semctl commands which require the rw_mutex
1279 * to be held in write mode.
1280 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
1281 */
semctl_down(struct ipc_namespace * ns,int semid,int cmd,int version,void __user * p)1282 static int semctl_down(struct ipc_namespace *ns, int semid,
1283 int cmd, int version, void __user *p)
1284 {
1285 struct sem_array *sma;
1286 int err;
1287 struct semid64_ds semid64;
1288 struct kern_ipc_perm *ipcp;
1289
1290 if(cmd == IPC_SET) {
1291 if (copy_semid_from_user(&semid64, p, version))
1292 return -EFAULT;
1293 }
1294
1295 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1296 &semid64.sem_perm, 0);
1297 if (IS_ERR(ipcp))
1298 return PTR_ERR(ipcp);
1299
1300 sma = container_of(ipcp, struct sem_array, sem_perm);
1301
1302 err = security_sem_semctl(sma, cmd);
1303 if (err) {
1304 rcu_read_unlock();
1305 goto out_up;
1306 }
1307
1308 switch(cmd){
1309 case IPC_RMID:
1310 sem_lock(sma, NULL, -1);
1311 freeary(ns, ipcp);
1312 goto out_up;
1313 case IPC_SET:
1314 sem_lock(sma, NULL, -1);
1315 err = ipc_update_perm(&semid64.sem_perm, ipcp);
1316 if (err)
1317 goto out_unlock;
1318 sma->sem_ctime = get_seconds();
1319 break;
1320 default:
1321 rcu_read_unlock();
1322 err = -EINVAL;
1323 goto out_up;
1324 }
1325
1326 out_unlock:
1327 sem_unlock(sma, -1);
1328 rcu_read_unlock();
1329 out_up:
1330 up_write(&sem_ids(ns).rw_mutex);
1331 return err;
1332 }
1333
SYSCALL_DEFINE4(semctl,int,semid,int,semnum,int,cmd,unsigned long,arg)1334 SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1335 {
1336 int version;
1337 struct ipc_namespace *ns;
1338 void __user *p = (void __user *)arg;
1339
1340 if (semid < 0)
1341 return -EINVAL;
1342
1343 version = ipc_parse_version(&cmd);
1344 ns = current->nsproxy->ipc_ns;
1345
1346 switch(cmd) {
1347 case IPC_INFO:
1348 case SEM_INFO:
1349 case IPC_STAT:
1350 case SEM_STAT:
1351 return semctl_nolock(ns, semid, cmd, version, p);
1352 case GETALL:
1353 case GETVAL:
1354 case GETPID:
1355 case GETNCNT:
1356 case GETZCNT:
1357 case SETALL:
1358 return semctl_main(ns, semid, semnum, cmd, p);
1359 case SETVAL:
1360 return semctl_setval(ns, semid, semnum, arg);
1361 case IPC_RMID:
1362 case IPC_SET:
1363 return semctl_down(ns, semid, cmd, version, p);
1364 default:
1365 return -EINVAL;
1366 }
1367 }
1368
1369 /* If the task doesn't already have a undo_list, then allocate one
1370 * here. We guarantee there is only one thread using this undo list,
1371 * and current is THE ONE
1372 *
1373 * If this allocation and assignment succeeds, but later
1374 * portions of this code fail, there is no need to free the sem_undo_list.
1375 * Just let it stay associated with the task, and it'll be freed later
1376 * at exit time.
1377 *
1378 * This can block, so callers must hold no locks.
1379 */
get_undo_list(struct sem_undo_list ** undo_listp)1380 static inline int get_undo_list(struct sem_undo_list **undo_listp)
1381 {
1382 struct sem_undo_list *undo_list;
1383
1384 undo_list = current->sysvsem.undo_list;
1385 if (!undo_list) {
1386 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1387 if (undo_list == NULL)
1388 return -ENOMEM;
1389 spin_lock_init(&undo_list->lock);
1390 atomic_set(&undo_list->refcnt, 1);
1391 INIT_LIST_HEAD(&undo_list->list_proc);
1392
1393 current->sysvsem.undo_list = undo_list;
1394 }
1395 *undo_listp = undo_list;
1396 return 0;
1397 }
1398
__lookup_undo(struct sem_undo_list * ulp,int semid)1399 static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1400 {
1401 struct sem_undo *un;
1402
1403 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1404 if (un->semid == semid)
1405 return un;
1406 }
1407 return NULL;
1408 }
1409
lookup_undo(struct sem_undo_list * ulp,int semid)1410 static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1411 {
1412 struct sem_undo *un;
1413
1414 assert_spin_locked(&ulp->lock);
1415
1416 un = __lookup_undo(ulp, semid);
1417 if (un) {
1418 list_del_rcu(&un->list_proc);
1419 list_add_rcu(&un->list_proc, &ulp->list_proc);
1420 }
1421 return un;
1422 }
1423
1424 /**
1425 * find_alloc_undo - Lookup (and if not present create) undo array
1426 * @ns: namespace
1427 * @semid: semaphore array id
1428 *
1429 * The function looks up (and if not present creates) the undo structure.
1430 * The size of the undo structure depends on the size of the semaphore
1431 * array, thus the alloc path is not that straightforward.
1432 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1433 * performs a rcu_read_lock().
1434 */
find_alloc_undo(struct ipc_namespace * ns,int semid)1435 static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1436 {
1437 struct sem_array *sma;
1438 struct sem_undo_list *ulp;
1439 struct sem_undo *un, *new;
1440 int nsems, error;
1441
1442 error = get_undo_list(&ulp);
1443 if (error)
1444 return ERR_PTR(error);
1445
1446 rcu_read_lock();
1447 spin_lock(&ulp->lock);
1448 un = lookup_undo(ulp, semid);
1449 spin_unlock(&ulp->lock);
1450 if (likely(un!=NULL))
1451 goto out;
1452
1453 /* no undo structure around - allocate one. */
1454 /* step 1: figure out the size of the semaphore array */
1455 sma = sem_obtain_object_check(ns, semid);
1456 if (IS_ERR(sma)) {
1457 rcu_read_unlock();
1458 return ERR_CAST(sma);
1459 }
1460
1461 nsems = sma->sem_nsems;
1462 if (!ipc_rcu_getref(sma)) {
1463 rcu_read_unlock();
1464 un = ERR_PTR(-EIDRM);
1465 goto out;
1466 }
1467 rcu_read_unlock();
1468
1469 /* step 2: allocate new undo structure */
1470 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1471 if (!new) {
1472 ipc_rcu_putref(sma, ipc_rcu_free);
1473 return ERR_PTR(-ENOMEM);
1474 }
1475
1476 /* step 3: Acquire the lock on semaphore array */
1477 rcu_read_lock();
1478 sem_lock_and_putref(sma);
1479 if (sma->sem_perm.deleted) {
1480 sem_unlock(sma, -1);
1481 rcu_read_unlock();
1482 kfree(new);
1483 un = ERR_PTR(-EIDRM);
1484 goto out;
1485 }
1486 spin_lock(&ulp->lock);
1487
1488 /*
1489 * step 4: check for races: did someone else allocate the undo struct?
1490 */
1491 un = lookup_undo(ulp, semid);
1492 if (un) {
1493 kfree(new);
1494 goto success;
1495 }
1496 /* step 5: initialize & link new undo structure */
1497 new->semadj = (short *) &new[1];
1498 new->ulp = ulp;
1499 new->semid = semid;
1500 assert_spin_locked(&ulp->lock);
1501 list_add_rcu(&new->list_proc, &ulp->list_proc);
1502 assert_spin_locked(&sma->sem_perm.lock);
1503 list_add(&new->list_id, &sma->list_id);
1504 un = new;
1505
1506 success:
1507 spin_unlock(&ulp->lock);
1508 sem_unlock(sma, -1);
1509 out:
1510 return un;
1511 }
1512
1513
1514 /**
1515 * get_queue_result - Retrieve the result code from sem_queue
1516 * @q: Pointer to queue structure
1517 *
1518 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1519 * q->status, then we must loop until the value is replaced with the final
1520 * value: This may happen if a task is woken up by an unrelated event (e.g.
1521 * signal) and in parallel the task is woken up by another task because it got
1522 * the requested semaphores.
1523 *
1524 * The function can be called with or without holding the semaphore spinlock.
1525 */
get_queue_result(struct sem_queue * q)1526 static int get_queue_result(struct sem_queue *q)
1527 {
1528 int error;
1529
1530 error = q->status;
1531 while (unlikely(error == IN_WAKEUP)) {
1532 cpu_relax();
1533 error = q->status;
1534 }
1535
1536 return error;
1537 }
1538
1539
SYSCALL_DEFINE4(semtimedop,int,semid,struct sembuf __user *,tsops,unsigned,nsops,const struct timespec __user *,timeout)1540 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1541 unsigned, nsops, const struct timespec __user *, timeout)
1542 {
1543 int error = -EINVAL;
1544 struct sem_array *sma;
1545 struct sembuf fast_sops[SEMOPM_FAST];
1546 struct sembuf* sops = fast_sops, *sop;
1547 struct sem_undo *un;
1548 int undos = 0, alter = 0, max, locknum;
1549 struct sem_queue queue;
1550 unsigned long jiffies_left = 0;
1551 struct ipc_namespace *ns;
1552 struct list_head tasks;
1553
1554 ns = current->nsproxy->ipc_ns;
1555
1556 if (nsops < 1 || semid < 0)
1557 return -EINVAL;
1558 if (nsops > ns->sc_semopm)
1559 return -E2BIG;
1560 if(nsops > SEMOPM_FAST) {
1561 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1562 if(sops==NULL)
1563 return -ENOMEM;
1564 }
1565 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1566 error=-EFAULT;
1567 goto out_free;
1568 }
1569 if (timeout) {
1570 struct timespec _timeout;
1571 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1572 error = -EFAULT;
1573 goto out_free;
1574 }
1575 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1576 _timeout.tv_nsec >= 1000000000L) {
1577 error = -EINVAL;
1578 goto out_free;
1579 }
1580 jiffies_left = timespec_to_jiffies(&_timeout);
1581 }
1582 max = 0;
1583 for (sop = sops; sop < sops + nsops; sop++) {
1584 if (sop->sem_num >= max)
1585 max = sop->sem_num;
1586 if (sop->sem_flg & SEM_UNDO)
1587 undos = 1;
1588 if (sop->sem_op != 0)
1589 alter = 1;
1590 }
1591
1592 INIT_LIST_HEAD(&tasks);
1593
1594 if (undos) {
1595 /* On success, find_alloc_undo takes the rcu_read_lock */
1596 un = find_alloc_undo(ns, semid);
1597 if (IS_ERR(un)) {
1598 error = PTR_ERR(un);
1599 goto out_free;
1600 }
1601 } else {
1602 un = NULL;
1603 rcu_read_lock();
1604 }
1605
1606 sma = sem_obtain_object_check(ns, semid);
1607 if (IS_ERR(sma)) {
1608 rcu_read_unlock();
1609 error = PTR_ERR(sma);
1610 goto out_free;
1611 }
1612
1613 error = -EFBIG;
1614 if (max >= sma->sem_nsems)
1615 goto out_rcu_wakeup;
1616
1617 error = -EACCES;
1618 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1619 goto out_rcu_wakeup;
1620
1621 error = security_sem_semop(sma, sops, nsops, alter);
1622 if (error)
1623 goto out_rcu_wakeup;
1624
1625 /*
1626 * semid identifiers are not unique - find_alloc_undo may have
1627 * allocated an undo structure, it was invalidated by an RMID
1628 * and now a new array with received the same id. Check and fail.
1629 * This case can be detected checking un->semid. The existence of
1630 * "un" itself is guaranteed by rcu.
1631 */
1632 error = -EIDRM;
1633 locknum = sem_lock(sma, sops, nsops);
1634 if (un && un->semid == -1)
1635 goto out_unlock_free;
1636
1637 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1638 if (error <= 0) {
1639 if (alter && error == 0)
1640 do_smart_update(sma, sops, nsops, 1, &tasks);
1641
1642 goto out_unlock_free;
1643 }
1644
1645 /* We need to sleep on this operation, so we put the current
1646 * task into the pending queue and go to sleep.
1647 */
1648
1649 queue.sops = sops;
1650 queue.nsops = nsops;
1651 queue.undo = un;
1652 queue.pid = task_tgid_vnr(current);
1653 queue.alter = alter;
1654
1655 if (nsops == 1) {
1656 struct sem *curr;
1657 curr = &sma->sem_base[sops->sem_num];
1658
1659 if (alter)
1660 list_add_tail(&queue.list, &curr->sem_pending);
1661 else
1662 list_add(&queue.list, &curr->sem_pending);
1663 } else {
1664 if (alter)
1665 list_add_tail(&queue.list, &sma->sem_pending);
1666 else
1667 list_add(&queue.list, &sma->sem_pending);
1668 sma->complex_count++;
1669 }
1670
1671 queue.status = -EINTR;
1672 queue.sleeper = current;
1673
1674 sleep_again:
1675 current->state = TASK_INTERRUPTIBLE;
1676 sem_unlock(sma, locknum);
1677 rcu_read_unlock();
1678
1679 if (timeout)
1680 jiffies_left = schedule_timeout(jiffies_left);
1681 else
1682 schedule();
1683
1684 error = get_queue_result(&queue);
1685
1686 if (error != -EINTR) {
1687 /* fast path: update_queue already obtained all requested
1688 * resources.
1689 * Perform a smp_mb(): User space could assume that semop()
1690 * is a memory barrier: Without the mb(), the cpu could
1691 * speculatively read in user space stale data that was
1692 * overwritten by the previous owner of the semaphore.
1693 */
1694 smp_mb();
1695
1696 goto out_free;
1697 }
1698
1699 rcu_read_lock();
1700 sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
1701
1702 /*
1703 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1704 */
1705 error = get_queue_result(&queue);
1706
1707 /*
1708 * Array removed? If yes, leave without sem_unlock().
1709 */
1710 if (IS_ERR(sma)) {
1711 rcu_read_unlock();
1712 goto out_free;
1713 }
1714
1715
1716 /*
1717 * If queue.status != -EINTR we are woken up by another process.
1718 * Leave without unlink_queue(), but with sem_unlock().
1719 */
1720
1721 if (error != -EINTR) {
1722 goto out_unlock_free;
1723 }
1724
1725 /*
1726 * If an interrupt occurred we have to clean up the queue
1727 */
1728 if (timeout && jiffies_left == 0)
1729 error = -EAGAIN;
1730
1731 /*
1732 * If the wakeup was spurious, just retry
1733 */
1734 if (error == -EINTR && !signal_pending(current))
1735 goto sleep_again;
1736
1737 unlink_queue(sma, &queue);
1738
1739 out_unlock_free:
1740 sem_unlock(sma, locknum);
1741 out_rcu_wakeup:
1742 rcu_read_unlock();
1743 wake_up_sem_queue_do(&tasks);
1744 out_free:
1745 if(sops != fast_sops)
1746 kfree(sops);
1747 return error;
1748 }
1749
SYSCALL_DEFINE3(semop,int,semid,struct sembuf __user *,tsops,unsigned,nsops)1750 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1751 unsigned, nsops)
1752 {
1753 return sys_semtimedop(semid, tsops, nsops, NULL);
1754 }
1755
1756 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1757 * parent and child tasks.
1758 */
1759
copy_semundo(unsigned long clone_flags,struct task_struct * tsk)1760 int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1761 {
1762 struct sem_undo_list *undo_list;
1763 int error;
1764
1765 if (clone_flags & CLONE_SYSVSEM) {
1766 error = get_undo_list(&undo_list);
1767 if (error)
1768 return error;
1769 atomic_inc(&undo_list->refcnt);
1770 tsk->sysvsem.undo_list = undo_list;
1771 } else
1772 tsk->sysvsem.undo_list = NULL;
1773
1774 return 0;
1775 }
1776
1777 /*
1778 * add semadj values to semaphores, free undo structures.
1779 * undo structures are not freed when semaphore arrays are destroyed
1780 * so some of them may be out of date.
1781 * IMPLEMENTATION NOTE: There is some confusion over whether the
1782 * set of adjustments that needs to be done should be done in an atomic
1783 * manner or not. That is, if we are attempting to decrement the semval
1784 * should we queue up and wait until we can do so legally?
1785 * The original implementation attempted to do this (queue and wait).
1786 * The current implementation does not do so. The POSIX standard
1787 * and SVID should be consulted to determine what behavior is mandated.
1788 */
exit_sem(struct task_struct * tsk)1789 void exit_sem(struct task_struct *tsk)
1790 {
1791 struct sem_undo_list *ulp;
1792
1793 ulp = tsk->sysvsem.undo_list;
1794 if (!ulp)
1795 return;
1796 tsk->sysvsem.undo_list = NULL;
1797
1798 if (!atomic_dec_and_test(&ulp->refcnt))
1799 return;
1800
1801 for (;;) {
1802 struct sem_array *sma;
1803 struct sem_undo *un;
1804 struct list_head tasks;
1805 int semid, i;
1806
1807 rcu_read_lock();
1808 un = list_entry_rcu(ulp->list_proc.next,
1809 struct sem_undo, list_proc);
1810 if (&un->list_proc == &ulp->list_proc)
1811 semid = -1;
1812 else
1813 semid = un->semid;
1814
1815 if (semid == -1) {
1816 rcu_read_unlock();
1817 break;
1818 }
1819
1820 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
1821 /* exit_sem raced with IPC_RMID, nothing to do */
1822 if (IS_ERR(sma)) {
1823 rcu_read_unlock();
1824 continue;
1825 }
1826
1827 sem_lock(sma, NULL, -1);
1828 un = __lookup_undo(ulp, semid);
1829 if (un == NULL) {
1830 /* exit_sem raced with IPC_RMID+semget() that created
1831 * exactly the same semid. Nothing to do.
1832 */
1833 sem_unlock(sma, -1);
1834 rcu_read_unlock();
1835 continue;
1836 }
1837
1838 /* remove un from the linked lists */
1839 assert_spin_locked(&sma->sem_perm.lock);
1840 list_del(&un->list_id);
1841
1842 spin_lock(&ulp->lock);
1843 list_del_rcu(&un->list_proc);
1844 spin_unlock(&ulp->lock);
1845
1846 /* perform adjustments registered in un */
1847 for (i = 0; i < sma->sem_nsems; i++) {
1848 struct sem * semaphore = &sma->sem_base[i];
1849 if (un->semadj[i]) {
1850 semaphore->semval += un->semadj[i];
1851 /*
1852 * Range checks of the new semaphore value,
1853 * not defined by sus:
1854 * - Some unices ignore the undo entirely
1855 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
1856 * - some cap the value (e.g. FreeBSD caps
1857 * at 0, but doesn't enforce SEMVMX)
1858 *
1859 * Linux caps the semaphore value, both at 0
1860 * and at SEMVMX.
1861 *
1862 * Manfred <manfred@colorfullife.com>
1863 */
1864 if (semaphore->semval < 0)
1865 semaphore->semval = 0;
1866 if (semaphore->semval > SEMVMX)
1867 semaphore->semval = SEMVMX;
1868 semaphore->sempid = task_tgid_vnr(current);
1869 }
1870 }
1871 /* maybe some queued-up processes were waiting for this */
1872 INIT_LIST_HEAD(&tasks);
1873 do_smart_update(sma, NULL, 0, 1, &tasks);
1874 sem_unlock(sma, -1);
1875 rcu_read_unlock();
1876 wake_up_sem_queue_do(&tasks);
1877
1878 kfree_rcu(un, rcu);
1879 }
1880 kfree(ulp);
1881 }
1882
1883 #ifdef CONFIG_PROC_FS
sysvipc_sem_proc_show(struct seq_file * s,void * it)1884 static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1885 {
1886 struct user_namespace *user_ns = seq_user_ns(s);
1887 struct sem_array *sma = it;
1888
1889 return seq_printf(s,
1890 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
1891 sma->sem_perm.key,
1892 sma->sem_perm.id,
1893 sma->sem_perm.mode,
1894 sma->sem_nsems,
1895 from_kuid_munged(user_ns, sma->sem_perm.uid),
1896 from_kgid_munged(user_ns, sma->sem_perm.gid),
1897 from_kuid_munged(user_ns, sma->sem_perm.cuid),
1898 from_kgid_munged(user_ns, sma->sem_perm.cgid),
1899 sma->sem_otime,
1900 sma->sem_ctime);
1901 }
1902 #endif
1903