1 /*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47 #include <linux/slab.h>
48 #include <linux/poll.h>
49 #include <linux/fs.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/export.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
62 #include <linux/ptrace.h>
63 #include <linux/sched/rt.h>
64 #include <linux/hugetlb.h>
65 #include <linux/freezer.h>
66 #include <linux/bootmem.h>
67 #include <linux/fault-inject.h>
68
69 #include <asm/futex.h>
70
71 #include "locking/rtmutex_common.h"
72
73 /*
74 * READ this before attempting to hack on futexes!
75 *
76 * Basic futex operation and ordering guarantees
77 * =============================================
78 *
79 * The waiter reads the futex value in user space and calls
80 * futex_wait(). This function computes the hash bucket and acquires
81 * the hash bucket lock. After that it reads the futex user space value
82 * again and verifies that the data has not changed. If it has not changed
83 * it enqueues itself into the hash bucket, releases the hash bucket lock
84 * and schedules.
85 *
86 * The waker side modifies the user space value of the futex and calls
87 * futex_wake(). This function computes the hash bucket and acquires the
88 * hash bucket lock. Then it looks for waiters on that futex in the hash
89 * bucket and wakes them.
90 *
91 * In futex wake up scenarios where no tasks are blocked on a futex, taking
92 * the hb spinlock can be avoided and simply return. In order for this
93 * optimization to work, ordering guarantees must exist so that the waiter
94 * being added to the list is acknowledged when the list is concurrently being
95 * checked by the waker, avoiding scenarios like the following:
96 *
97 * CPU 0 CPU 1
98 * val = *futex;
99 * sys_futex(WAIT, futex, val);
100 * futex_wait(futex, val);
101 * uval = *futex;
102 * *futex = newval;
103 * sys_futex(WAKE, futex);
104 * futex_wake(futex);
105 * if (queue_empty())
106 * return;
107 * if (uval == val)
108 * lock(hash_bucket(futex));
109 * queue();
110 * unlock(hash_bucket(futex));
111 * schedule();
112 *
113 * This would cause the waiter on CPU 0 to wait forever because it
114 * missed the transition of the user space value from val to newval
115 * and the waker did not find the waiter in the hash bucket queue.
116 *
117 * The correct serialization ensures that a waiter either observes
118 * the changed user space value before blocking or is woken by a
119 * concurrent waker:
120 *
121 * CPU 0 CPU 1
122 * val = *futex;
123 * sys_futex(WAIT, futex, val);
124 * futex_wait(futex, val);
125 *
126 * waiters++; (a)
127 * smp_mb(); (A) <-- paired with -.
128 * |
129 * lock(hash_bucket(futex)); |
130 * |
131 * uval = *futex; |
132 * | *futex = newval;
133 * | sys_futex(WAKE, futex);
134 * | futex_wake(futex);
135 * |
136 * `--------> smp_mb(); (B)
137 * if (uval == val)
138 * queue();
139 * unlock(hash_bucket(futex));
140 * schedule(); if (waiters)
141 * lock(hash_bucket(futex));
142 * else wake_waiters(futex);
143 * waiters--; (b) unlock(hash_bucket(futex));
144 *
145 * Where (A) orders the waiters increment and the futex value read through
146 * atomic operations (see hb_waiters_inc) and where (B) orders the write
147 * to futex and the waiters read -- this is done by the barriers for both
148 * shared and private futexes in get_futex_key_refs().
149 *
150 * This yields the following case (where X:=waiters, Y:=futex):
151 *
152 * X = Y = 0
153 *
154 * w[X]=1 w[Y]=1
155 * MB MB
156 * r[Y]=y r[X]=x
157 *
158 * Which guarantees that x==0 && y==0 is impossible; which translates back into
159 * the guarantee that we cannot both miss the futex variable change and the
160 * enqueue.
161 *
162 * Note that a new waiter is accounted for in (a) even when it is possible that
163 * the wait call can return error, in which case we backtrack from it in (b).
164 * Refer to the comment in queue_lock().
165 *
166 * Similarly, in order to account for waiters being requeued on another
167 * address we always increment the waiters for the destination bucket before
168 * acquiring the lock. It then decrements them again after releasing it -
169 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
170 * will do the additional required waiter count housekeeping. This is done for
171 * double_lock_hb() and double_unlock_hb(), respectively.
172 */
173
174 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
175 int __read_mostly futex_cmpxchg_enabled;
176 #endif
177
178 /*
179 * Futex flags used to encode options to functions and preserve them across
180 * restarts.
181 */
182 #ifdef CONFIG_MMU
183 # define FLAGS_SHARED 0x01
184 #else
185 /*
186 * NOMMU does not have per process address space. Let the compiler optimize
187 * code away.
188 */
189 # define FLAGS_SHARED 0x00
190 #endif
191 #define FLAGS_CLOCKRT 0x02
192 #define FLAGS_HAS_TIMEOUT 0x04
193
194 /*
195 * Priority Inheritance state:
196 */
197 struct futex_pi_state {
198 /*
199 * list of 'owned' pi_state instances - these have to be
200 * cleaned up in do_exit() if the task exits prematurely:
201 */
202 struct list_head list;
203
204 /*
205 * The PI object:
206 */
207 struct rt_mutex pi_mutex;
208
209 struct task_struct *owner;
210 atomic_t refcount;
211
212 union futex_key key;
213 };
214
215 /**
216 * struct futex_q - The hashed futex queue entry, one per waiting task
217 * @list: priority-sorted list of tasks waiting on this futex
218 * @task: the task waiting on the futex
219 * @lock_ptr: the hash bucket lock
220 * @key: the key the futex is hashed on
221 * @pi_state: optional priority inheritance state
222 * @rt_waiter: rt_waiter storage for use with requeue_pi
223 * @requeue_pi_key: the requeue_pi target futex key
224 * @bitset: bitset for the optional bitmasked wakeup
225 *
226 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
227 * we can wake only the relevant ones (hashed queues may be shared).
228 *
229 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
230 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
231 * The order of wakeup is always to make the first condition true, then
232 * the second.
233 *
234 * PI futexes are typically woken before they are removed from the hash list via
235 * the rt_mutex code. See unqueue_me_pi().
236 */
237 struct futex_q {
238 struct plist_node list;
239
240 struct task_struct *task;
241 spinlock_t *lock_ptr;
242 union futex_key key;
243 struct futex_pi_state *pi_state;
244 struct rt_mutex_waiter *rt_waiter;
245 union futex_key *requeue_pi_key;
246 u32 bitset;
247 };
248
249 static const struct futex_q futex_q_init = {
250 /* list gets initialized in queue_me()*/
251 .key = FUTEX_KEY_INIT,
252 .bitset = FUTEX_BITSET_MATCH_ANY
253 };
254
255 /*
256 * Hash buckets are shared by all the futex_keys that hash to the same
257 * location. Each key may have multiple futex_q structures, one for each task
258 * waiting on a futex.
259 */
260 struct futex_hash_bucket {
261 atomic_t waiters;
262 spinlock_t lock;
263 struct plist_head chain;
264 } ____cacheline_aligned_in_smp;
265
266 /*
267 * The base of the bucket array and its size are always used together
268 * (after initialization only in hash_futex()), so ensure that they
269 * reside in the same cacheline.
270 */
271 static struct {
272 struct futex_hash_bucket *queues;
273 unsigned long hashsize;
274 } __futex_data __read_mostly __aligned(2*sizeof(long));
275 #define futex_queues (__futex_data.queues)
276 #define futex_hashsize (__futex_data.hashsize)
277
278
279 /*
280 * Fault injections for futexes.
281 */
282 #ifdef CONFIG_FAIL_FUTEX
283
284 static struct {
285 struct fault_attr attr;
286
287 bool ignore_private;
288 } fail_futex = {
289 .attr = FAULT_ATTR_INITIALIZER,
290 .ignore_private = false,
291 };
292
setup_fail_futex(char * str)293 static int __init setup_fail_futex(char *str)
294 {
295 return setup_fault_attr(&fail_futex.attr, str);
296 }
297 __setup("fail_futex=", setup_fail_futex);
298
should_fail_futex(bool fshared)299 static bool should_fail_futex(bool fshared)
300 {
301 if (fail_futex.ignore_private && !fshared)
302 return false;
303
304 return should_fail(&fail_futex.attr, 1);
305 }
306
307 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
308
fail_futex_debugfs(void)309 static int __init fail_futex_debugfs(void)
310 {
311 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
312 struct dentry *dir;
313
314 dir = fault_create_debugfs_attr("fail_futex", NULL,
315 &fail_futex.attr);
316 if (IS_ERR(dir))
317 return PTR_ERR(dir);
318
319 if (!debugfs_create_bool("ignore-private", mode, dir,
320 &fail_futex.ignore_private)) {
321 debugfs_remove_recursive(dir);
322 return -ENOMEM;
323 }
324
325 return 0;
326 }
327
328 late_initcall(fail_futex_debugfs);
329
330 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
331
332 #else
should_fail_futex(bool fshared)333 static inline bool should_fail_futex(bool fshared)
334 {
335 return false;
336 }
337 #endif /* CONFIG_FAIL_FUTEX */
338
futex_get_mm(union futex_key * key)339 static inline void futex_get_mm(union futex_key *key)
340 {
341 atomic_inc(&key->private.mm->mm_count);
342 /*
343 * Ensure futex_get_mm() implies a full barrier such that
344 * get_futex_key() implies a full barrier. This is relied upon
345 * as smp_mb(); (B), see the ordering comment above.
346 */
347 smp_mb__after_atomic();
348 }
349
350 /*
351 * Reflects a new waiter being added to the waitqueue.
352 */
hb_waiters_inc(struct futex_hash_bucket * hb)353 static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
354 {
355 #ifdef CONFIG_SMP
356 atomic_inc(&hb->waiters);
357 /*
358 * Full barrier (A), see the ordering comment above.
359 */
360 smp_mb__after_atomic();
361 #endif
362 }
363
364 /*
365 * Reflects a waiter being removed from the waitqueue by wakeup
366 * paths.
367 */
hb_waiters_dec(struct futex_hash_bucket * hb)368 static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
369 {
370 #ifdef CONFIG_SMP
371 atomic_dec(&hb->waiters);
372 #endif
373 }
374
hb_waiters_pending(struct futex_hash_bucket * hb)375 static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
376 {
377 #ifdef CONFIG_SMP
378 return atomic_read(&hb->waiters);
379 #else
380 return 1;
381 #endif
382 }
383
384 /**
385 * hash_futex - Return the hash bucket in the global hash
386 * @key: Pointer to the futex key for which the hash is calculated
387 *
388 * We hash on the keys returned from get_futex_key (see below) and return the
389 * corresponding hash bucket in the global hash.
390 */
hash_futex(union futex_key * key)391 static struct futex_hash_bucket *hash_futex(union futex_key *key)
392 {
393 u32 hash = jhash2((u32*)&key->both.word,
394 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
395 key->both.offset);
396 return &futex_queues[hash & (futex_hashsize - 1)];
397 }
398
399
400 /**
401 * match_futex - Check whether two futex keys are equal
402 * @key1: Pointer to key1
403 * @key2: Pointer to key2
404 *
405 * Return 1 if two futex_keys are equal, 0 otherwise.
406 */
match_futex(union futex_key * key1,union futex_key * key2)407 static inline int match_futex(union futex_key *key1, union futex_key *key2)
408 {
409 return (key1 && key2
410 && key1->both.word == key2->both.word
411 && key1->both.ptr == key2->both.ptr
412 && key1->both.offset == key2->both.offset);
413 }
414
415 /*
416 * Take a reference to the resource addressed by a key.
417 * Can be called while holding spinlocks.
418 *
419 */
get_futex_key_refs(union futex_key * key)420 static void get_futex_key_refs(union futex_key *key)
421 {
422 if (!key->both.ptr)
423 return;
424
425 /*
426 * On MMU less systems futexes are always "private" as there is no per
427 * process address space. We need the smp wmb nevertheless - yes,
428 * arch/blackfin has MMU less SMP ...
429 */
430 if (!IS_ENABLED(CONFIG_MMU)) {
431 smp_mb(); /* explicit smp_mb(); (B) */
432 return;
433 }
434
435 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
436 case FUT_OFF_INODE:
437 ihold(key->shared.inode); /* implies smp_mb(); (B) */
438 break;
439 case FUT_OFF_MMSHARED:
440 futex_get_mm(key); /* implies smp_mb(); (B) */
441 break;
442 default:
443 /*
444 * Private futexes do not hold reference on an inode or
445 * mm, therefore the only purpose of calling get_futex_key_refs
446 * is because we need the barrier for the lockless waiter check.
447 */
448 smp_mb(); /* explicit smp_mb(); (B) */
449 }
450 }
451
452 /*
453 * Drop a reference to the resource addressed by a key.
454 * The hash bucket spinlock must not be held. This is
455 * a no-op for private futexes, see comment in the get
456 * counterpart.
457 */
drop_futex_key_refs(union futex_key * key)458 static void drop_futex_key_refs(union futex_key *key)
459 {
460 if (!key->both.ptr) {
461 /* If we're here then we tried to put a key we failed to get */
462 WARN_ON_ONCE(1);
463 return;
464 }
465
466 if (!IS_ENABLED(CONFIG_MMU))
467 return;
468
469 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
470 case FUT_OFF_INODE:
471 iput(key->shared.inode);
472 break;
473 case FUT_OFF_MMSHARED:
474 mmdrop(key->private.mm);
475 break;
476 }
477 }
478
479 /**
480 * get_futex_key() - Get parameters which are the keys for a futex
481 * @uaddr: virtual address of the futex
482 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
483 * @key: address where result is stored.
484 * @rw: mapping needs to be read/write (values: VERIFY_READ,
485 * VERIFY_WRITE)
486 *
487 * Return: a negative error code or 0
488 *
489 * The key words are stored in *key on success.
490 *
491 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
492 * offset_within_page). For private mappings, it's (uaddr, current->mm).
493 * We can usually work out the index without swapping in the page.
494 *
495 * lock_page() might sleep, the caller should not hold a spinlock.
496 */
497 static int
get_futex_key(u32 __user * uaddr,int fshared,union futex_key * key,int rw)498 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
499 {
500 unsigned long address = (unsigned long)uaddr;
501 struct mm_struct *mm = current->mm;
502 struct page *page, *tail;
503 struct address_space *mapping;
504 int err, ro = 0;
505
506 /*
507 * The futex address must be "naturally" aligned.
508 */
509 key->both.offset = address % PAGE_SIZE;
510 if (unlikely((address % sizeof(u32)) != 0))
511 return -EINVAL;
512 address -= key->both.offset;
513
514 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
515 return -EFAULT;
516
517 if (unlikely(should_fail_futex(fshared)))
518 return -EFAULT;
519
520 /*
521 * PROCESS_PRIVATE futexes are fast.
522 * As the mm cannot disappear under us and the 'key' only needs
523 * virtual address, we dont even have to find the underlying vma.
524 * Note : We do have to check 'uaddr' is a valid user address,
525 * but access_ok() should be faster than find_vma()
526 */
527 if (!fshared) {
528 key->private.mm = mm;
529 key->private.address = address;
530 get_futex_key_refs(key); /* implies smp_mb(); (B) */
531 return 0;
532 }
533
534 again:
535 /* Ignore any VERIFY_READ mapping (futex common case) */
536 if (unlikely(should_fail_futex(fshared)))
537 return -EFAULT;
538
539 err = get_user_pages_fast(address, 1, 1, &page);
540 /*
541 * If write access is not required (eg. FUTEX_WAIT), try
542 * and get read-only access.
543 */
544 if (err == -EFAULT && rw == VERIFY_READ) {
545 err = get_user_pages_fast(address, 1, 0, &page);
546 ro = 1;
547 }
548 if (err < 0)
549 return err;
550 else
551 err = 0;
552
553 /*
554 * The treatment of mapping from this point on is critical. The page
555 * lock protects many things but in this context the page lock
556 * stabilizes mapping, prevents inode freeing in the shared
557 * file-backed region case and guards against movement to swap cache.
558 *
559 * Strictly speaking the page lock is not needed in all cases being
560 * considered here and page lock forces unnecessarily serialization
561 * From this point on, mapping will be re-verified if necessary and
562 * page lock will be acquired only if it is unavoidable
563 *
564 * Mapping checks require the head page for any compound page so the
565 * head page and mapping is looked up now. For anonymous pages, it
566 * does not matter if the page splits in the future as the key is
567 * based on the address. For filesystem-backed pages, the tail is
568 * required as the index of the page determines the key. For
569 * base pages, there is no tail page and tail == page.
570 */
571 tail = page;
572 page = compound_head(page);
573 mapping = READ_ONCE(page->mapping);
574
575 /*
576 * If page->mapping is NULL, then it cannot be a PageAnon
577 * page; but it might be the ZERO_PAGE or in the gate area or
578 * in a special mapping (all cases which we are happy to fail);
579 * or it may have been a good file page when get_user_pages_fast
580 * found it, but truncated or holepunched or subjected to
581 * invalidate_complete_page2 before we got the page lock (also
582 * cases which we are happy to fail). And we hold a reference,
583 * so refcount care in invalidate_complete_page's remove_mapping
584 * prevents drop_caches from setting mapping to NULL beneath us.
585 *
586 * The case we do have to guard against is when memory pressure made
587 * shmem_writepage move it from filecache to swapcache beneath us:
588 * an unlikely race, but we do need to retry for page->mapping.
589 */
590 if (unlikely(!mapping)) {
591 int shmem_swizzled;
592
593 /*
594 * Page lock is required to identify which special case above
595 * applies. If this is really a shmem page then the page lock
596 * will prevent unexpected transitions.
597 */
598 lock_page(page);
599 shmem_swizzled = PageSwapCache(page) || page->mapping;
600 unlock_page(page);
601 put_page(page);
602
603 if (shmem_swizzled)
604 goto again;
605
606 return -EFAULT;
607 }
608
609 /*
610 * Private mappings are handled in a simple way.
611 *
612 * If the futex key is stored on an anonymous page, then the associated
613 * object is the mm which is implicitly pinned by the calling process.
614 *
615 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
616 * it's a read-only handle, it's expected that futexes attach to
617 * the object not the particular process.
618 */
619 if (PageAnon(page)) {
620 /*
621 * A RO anonymous page will never change and thus doesn't make
622 * sense for futex operations.
623 */
624 if (unlikely(should_fail_futex(fshared)) || ro) {
625 err = -EFAULT;
626 goto out;
627 }
628
629 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
630 key->private.mm = mm;
631 key->private.address = address;
632
633 get_futex_key_refs(key); /* implies smp_mb(); (B) */
634
635 } else {
636 struct inode *inode;
637
638 /*
639 * The associated futex object in this case is the inode and
640 * the page->mapping must be traversed. Ordinarily this should
641 * be stabilised under page lock but it's not strictly
642 * necessary in this case as we just want to pin the inode, not
643 * update the radix tree or anything like that.
644 *
645 * The RCU read lock is taken as the inode is finally freed
646 * under RCU. If the mapping still matches expectations then the
647 * mapping->host can be safely accessed as being a valid inode.
648 */
649 rcu_read_lock();
650
651 if (READ_ONCE(page->mapping) != mapping) {
652 rcu_read_unlock();
653 put_page(page);
654
655 goto again;
656 }
657
658 inode = READ_ONCE(mapping->host);
659 if (!inode) {
660 rcu_read_unlock();
661 put_page(page);
662
663 goto again;
664 }
665
666 /*
667 * Take a reference unless it is about to be freed. Previously
668 * this reference was taken by ihold under the page lock
669 * pinning the inode in place so i_lock was unnecessary. The
670 * only way for this check to fail is if the inode was
671 * truncated in parallel which is almost certainly an
672 * application bug. In such a case, just retry.
673 *
674 * We are not calling into get_futex_key_refs() in file-backed
675 * cases, therefore a successful atomic_inc return below will
676 * guarantee that get_futex_key() will still imply smp_mb(); (B).
677 */
678 if (!atomic_inc_not_zero(&inode->i_count)) {
679 rcu_read_unlock();
680 put_page(page);
681
682 goto again;
683 }
684
685 /* Should be impossible but lets be paranoid for now */
686 if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
687 err = -EFAULT;
688 rcu_read_unlock();
689 iput(inode);
690
691 goto out;
692 }
693
694 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
695 key->shared.inode = inode;
696 key->shared.pgoff = basepage_index(tail);
697 rcu_read_unlock();
698 }
699
700 out:
701 put_page(page);
702 return err;
703 }
704
put_futex_key(union futex_key * key)705 static inline void put_futex_key(union futex_key *key)
706 {
707 drop_futex_key_refs(key);
708 }
709
710 /**
711 * fault_in_user_writeable() - Fault in user address and verify RW access
712 * @uaddr: pointer to faulting user space address
713 *
714 * Slow path to fixup the fault we just took in the atomic write
715 * access to @uaddr.
716 *
717 * We have no generic implementation of a non-destructive write to the
718 * user address. We know that we faulted in the atomic pagefault
719 * disabled section so we can as well avoid the #PF overhead by
720 * calling get_user_pages() right away.
721 */
fault_in_user_writeable(u32 __user * uaddr)722 static int fault_in_user_writeable(u32 __user *uaddr)
723 {
724 struct mm_struct *mm = current->mm;
725 int ret;
726
727 down_read(&mm->mmap_sem);
728 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
729 FAULT_FLAG_WRITE, NULL);
730 up_read(&mm->mmap_sem);
731
732 return ret < 0 ? ret : 0;
733 }
734
735 /**
736 * futex_top_waiter() - Return the highest priority waiter on a futex
737 * @hb: the hash bucket the futex_q's reside in
738 * @key: the futex key (to distinguish it from other futex futex_q's)
739 *
740 * Must be called with the hb lock held.
741 */
futex_top_waiter(struct futex_hash_bucket * hb,union futex_key * key)742 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
743 union futex_key *key)
744 {
745 struct futex_q *this;
746
747 plist_for_each_entry(this, &hb->chain, list) {
748 if (match_futex(&this->key, key))
749 return this;
750 }
751 return NULL;
752 }
753
cmpxchg_futex_value_locked(u32 * curval,u32 __user * uaddr,u32 uval,u32 newval)754 static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
755 u32 uval, u32 newval)
756 {
757 int ret;
758
759 pagefault_disable();
760 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
761 pagefault_enable();
762
763 return ret;
764 }
765
get_futex_value_locked(u32 * dest,u32 __user * from)766 static int get_futex_value_locked(u32 *dest, u32 __user *from)
767 {
768 int ret;
769
770 pagefault_disable();
771 ret = __get_user(*dest, from);
772 pagefault_enable();
773
774 return ret ? -EFAULT : 0;
775 }
776
777
778 /*
779 * PI code:
780 */
refill_pi_state_cache(void)781 static int refill_pi_state_cache(void)
782 {
783 struct futex_pi_state *pi_state;
784
785 if (likely(current->pi_state_cache))
786 return 0;
787
788 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
789
790 if (!pi_state)
791 return -ENOMEM;
792
793 INIT_LIST_HEAD(&pi_state->list);
794 /* pi_mutex gets initialized later */
795 pi_state->owner = NULL;
796 atomic_set(&pi_state->refcount, 1);
797 pi_state->key = FUTEX_KEY_INIT;
798
799 current->pi_state_cache = pi_state;
800
801 return 0;
802 }
803
alloc_pi_state(void)804 static struct futex_pi_state * alloc_pi_state(void)
805 {
806 struct futex_pi_state *pi_state = current->pi_state_cache;
807
808 WARN_ON(!pi_state);
809 current->pi_state_cache = NULL;
810
811 return pi_state;
812 }
813
814 /*
815 * Drops a reference to the pi_state object and frees or caches it
816 * when the last reference is gone.
817 *
818 * Must be called with the hb lock held.
819 */
put_pi_state(struct futex_pi_state * pi_state)820 static void put_pi_state(struct futex_pi_state *pi_state)
821 {
822 if (!pi_state)
823 return;
824
825 if (!atomic_dec_and_test(&pi_state->refcount))
826 return;
827
828 /*
829 * If pi_state->owner is NULL, the owner is most probably dying
830 * and has cleaned up the pi_state already
831 */
832 if (pi_state->owner) {
833 raw_spin_lock_irq(&pi_state->owner->pi_lock);
834 list_del_init(&pi_state->list);
835 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
836
837 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
838 }
839
840 if (current->pi_state_cache)
841 kfree(pi_state);
842 else {
843 /*
844 * pi_state->list is already empty.
845 * clear pi_state->owner.
846 * refcount is at 0 - put it back to 1.
847 */
848 pi_state->owner = NULL;
849 atomic_set(&pi_state->refcount, 1);
850 current->pi_state_cache = pi_state;
851 }
852 }
853
854 /*
855 * Look up the task based on what TID userspace gave us.
856 * We dont trust it.
857 */
futex_find_get_task(pid_t pid)858 static struct task_struct * futex_find_get_task(pid_t pid)
859 {
860 struct task_struct *p;
861
862 rcu_read_lock();
863 p = find_task_by_vpid(pid);
864 if (p)
865 get_task_struct(p);
866
867 rcu_read_unlock();
868
869 return p;
870 }
871
872 /*
873 * This task is holding PI mutexes at exit time => bad.
874 * Kernel cleans up PI-state, but userspace is likely hosed.
875 * (Robust-futex cleanup is separate and might save the day for userspace.)
876 */
exit_pi_state_list(struct task_struct * curr)877 void exit_pi_state_list(struct task_struct *curr)
878 {
879 struct list_head *next, *head = &curr->pi_state_list;
880 struct futex_pi_state *pi_state;
881 struct futex_hash_bucket *hb;
882 union futex_key key = FUTEX_KEY_INIT;
883
884 if (!futex_cmpxchg_enabled)
885 return;
886 /*
887 * We are a ZOMBIE and nobody can enqueue itself on
888 * pi_state_list anymore, but we have to be careful
889 * versus waiters unqueueing themselves:
890 */
891 raw_spin_lock_irq(&curr->pi_lock);
892 while (!list_empty(head)) {
893
894 next = head->next;
895 pi_state = list_entry(next, struct futex_pi_state, list);
896 key = pi_state->key;
897 hb = hash_futex(&key);
898 raw_spin_unlock_irq(&curr->pi_lock);
899
900 spin_lock(&hb->lock);
901
902 raw_spin_lock_irq(&curr->pi_lock);
903 /*
904 * We dropped the pi-lock, so re-check whether this
905 * task still owns the PI-state:
906 */
907 if (head->next != next) {
908 spin_unlock(&hb->lock);
909 continue;
910 }
911
912 WARN_ON(pi_state->owner != curr);
913 WARN_ON(list_empty(&pi_state->list));
914 list_del_init(&pi_state->list);
915 pi_state->owner = NULL;
916 raw_spin_unlock_irq(&curr->pi_lock);
917
918 rt_mutex_unlock(&pi_state->pi_mutex);
919
920 spin_unlock(&hb->lock);
921
922 raw_spin_lock_irq(&curr->pi_lock);
923 }
924 raw_spin_unlock_irq(&curr->pi_lock);
925 }
926
927 /*
928 * We need to check the following states:
929 *
930 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
931 *
932 * [1] NULL | --- | --- | 0 | 0/1 | Valid
933 * [2] NULL | --- | --- | >0 | 0/1 | Valid
934 *
935 * [3] Found | NULL | -- | Any | 0/1 | Invalid
936 *
937 * [4] Found | Found | NULL | 0 | 1 | Valid
938 * [5] Found | Found | NULL | >0 | 1 | Invalid
939 *
940 * [6] Found | Found | task | 0 | 1 | Valid
941 *
942 * [7] Found | Found | NULL | Any | 0 | Invalid
943 *
944 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
945 * [9] Found | Found | task | 0 | 0 | Invalid
946 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
947 *
948 * [1] Indicates that the kernel can acquire the futex atomically. We
949 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
950 *
951 * [2] Valid, if TID does not belong to a kernel thread. If no matching
952 * thread is found then it indicates that the owner TID has died.
953 *
954 * [3] Invalid. The waiter is queued on a non PI futex
955 *
956 * [4] Valid state after exit_robust_list(), which sets the user space
957 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
958 *
959 * [5] The user space value got manipulated between exit_robust_list()
960 * and exit_pi_state_list()
961 *
962 * [6] Valid state after exit_pi_state_list() which sets the new owner in
963 * the pi_state but cannot access the user space value.
964 *
965 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
966 *
967 * [8] Owner and user space value match
968 *
969 * [9] There is no transient state which sets the user space TID to 0
970 * except exit_robust_list(), but this is indicated by the
971 * FUTEX_OWNER_DIED bit. See [4]
972 *
973 * [10] There is no transient state which leaves owner and user space
974 * TID out of sync.
975 */
976
977 /*
978 * Validate that the existing waiter has a pi_state and sanity check
979 * the pi_state against the user space value. If correct, attach to
980 * it.
981 */
attach_to_pi_state(u32 uval,struct futex_pi_state * pi_state,struct futex_pi_state ** ps)982 static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
983 struct futex_pi_state **ps)
984 {
985 pid_t pid = uval & FUTEX_TID_MASK;
986
987 /*
988 * Userspace might have messed up non-PI and PI futexes [3]
989 */
990 if (unlikely(!pi_state))
991 return -EINVAL;
992
993 WARN_ON(!atomic_read(&pi_state->refcount));
994
995 /*
996 * Handle the owner died case:
997 */
998 if (uval & FUTEX_OWNER_DIED) {
999 /*
1000 * exit_pi_state_list sets owner to NULL and wakes the
1001 * topmost waiter. The task which acquires the
1002 * pi_state->rt_mutex will fixup owner.
1003 */
1004 if (!pi_state->owner) {
1005 /*
1006 * No pi state owner, but the user space TID
1007 * is not 0. Inconsistent state. [5]
1008 */
1009 if (pid)
1010 return -EINVAL;
1011 /*
1012 * Take a ref on the state and return success. [4]
1013 */
1014 goto out_state;
1015 }
1016
1017 /*
1018 * If TID is 0, then either the dying owner has not
1019 * yet executed exit_pi_state_list() or some waiter
1020 * acquired the rtmutex in the pi state, but did not
1021 * yet fixup the TID in user space.
1022 *
1023 * Take a ref on the state and return success. [6]
1024 */
1025 if (!pid)
1026 goto out_state;
1027 } else {
1028 /*
1029 * If the owner died bit is not set, then the pi_state
1030 * must have an owner. [7]
1031 */
1032 if (!pi_state->owner)
1033 return -EINVAL;
1034 }
1035
1036 /*
1037 * Bail out if user space manipulated the futex value. If pi
1038 * state exists then the owner TID must be the same as the
1039 * user space TID. [9/10]
1040 */
1041 if (pid != task_pid_vnr(pi_state->owner))
1042 return -EINVAL;
1043 out_state:
1044 atomic_inc(&pi_state->refcount);
1045 *ps = pi_state;
1046 return 0;
1047 }
1048
1049 /*
1050 * Lookup the task for the TID provided from user space and attach to
1051 * it after doing proper sanity checks.
1052 */
attach_to_pi_owner(u32 uval,union futex_key * key,struct futex_pi_state ** ps)1053 static int attach_to_pi_owner(u32 uval, union futex_key *key,
1054 struct futex_pi_state **ps)
1055 {
1056 pid_t pid = uval & FUTEX_TID_MASK;
1057 struct futex_pi_state *pi_state;
1058 struct task_struct *p;
1059
1060 /*
1061 * We are the first waiter - try to look up the real owner and attach
1062 * the new pi_state to it, but bail out when TID = 0 [1]
1063 */
1064 if (!pid)
1065 return -ESRCH;
1066 p = futex_find_get_task(pid);
1067 if (!p)
1068 return -ESRCH;
1069
1070 if (unlikely(p->flags & PF_KTHREAD)) {
1071 put_task_struct(p);
1072 return -EPERM;
1073 }
1074
1075 /*
1076 * We need to look at the task state flags to figure out,
1077 * whether the task is exiting. To protect against the do_exit
1078 * change of the task flags, we do this protected by
1079 * p->pi_lock:
1080 */
1081 raw_spin_lock_irq(&p->pi_lock);
1082 if (unlikely(p->flags & PF_EXITING)) {
1083 /*
1084 * The task is on the way out. When PF_EXITPIDONE is
1085 * set, we know that the task has finished the
1086 * cleanup:
1087 */
1088 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
1089
1090 raw_spin_unlock_irq(&p->pi_lock);
1091 put_task_struct(p);
1092 return ret;
1093 }
1094
1095 /*
1096 * No existing pi state. First waiter. [2]
1097 */
1098 pi_state = alloc_pi_state();
1099
1100 /*
1101 * Initialize the pi_mutex in locked state and make @p
1102 * the owner of it:
1103 */
1104 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1105
1106 /* Store the key for possible exit cleanups: */
1107 pi_state->key = *key;
1108
1109 WARN_ON(!list_empty(&pi_state->list));
1110 list_add(&pi_state->list, &p->pi_state_list);
1111 pi_state->owner = p;
1112 raw_spin_unlock_irq(&p->pi_lock);
1113
1114 put_task_struct(p);
1115
1116 *ps = pi_state;
1117
1118 return 0;
1119 }
1120
lookup_pi_state(u32 uval,struct futex_hash_bucket * hb,union futex_key * key,struct futex_pi_state ** ps)1121 static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
1122 union futex_key *key, struct futex_pi_state **ps)
1123 {
1124 struct futex_q *match = futex_top_waiter(hb, key);
1125
1126 /*
1127 * If there is a waiter on that futex, validate it and
1128 * attach to the pi_state when the validation succeeds.
1129 */
1130 if (match)
1131 return attach_to_pi_state(uval, match->pi_state, ps);
1132
1133 /*
1134 * We are the first waiter - try to look up the owner based on
1135 * @uval and attach to it.
1136 */
1137 return attach_to_pi_owner(uval, key, ps);
1138 }
1139
lock_pi_update_atomic(u32 __user * uaddr,u32 uval,u32 newval)1140 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1141 {
1142 u32 uninitialized_var(curval);
1143
1144 if (unlikely(should_fail_futex(true)))
1145 return -EFAULT;
1146
1147 if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1148 return -EFAULT;
1149
1150 /*If user space value changed, let the caller retry */
1151 return curval != uval ? -EAGAIN : 0;
1152 }
1153
1154 /**
1155 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1156 * @uaddr: the pi futex user address
1157 * @hb: the pi futex hash bucket
1158 * @key: the futex key associated with uaddr and hb
1159 * @ps: the pi_state pointer where we store the result of the
1160 * lookup
1161 * @task: the task to perform the atomic lock work for. This will
1162 * be "current" except in the case of requeue pi.
1163 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1164 *
1165 * Return:
1166 * 0 - ready to wait;
1167 * 1 - acquired the lock;
1168 * <0 - error
1169 *
1170 * The hb->lock and futex_key refs shall be held by the caller.
1171 */
futex_lock_pi_atomic(u32 __user * uaddr,struct futex_hash_bucket * hb,union futex_key * key,struct futex_pi_state ** ps,struct task_struct * task,int set_waiters)1172 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1173 union futex_key *key,
1174 struct futex_pi_state **ps,
1175 struct task_struct *task, int set_waiters)
1176 {
1177 u32 uval, newval, vpid = task_pid_vnr(task);
1178 struct futex_q *match;
1179 int ret;
1180
1181 /*
1182 * Read the user space value first so we can validate a few
1183 * things before proceeding further.
1184 */
1185 if (get_futex_value_locked(&uval, uaddr))
1186 return -EFAULT;
1187
1188 if (unlikely(should_fail_futex(true)))
1189 return -EFAULT;
1190
1191 /*
1192 * Detect deadlocks.
1193 */
1194 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1195 return -EDEADLK;
1196
1197 if ((unlikely(should_fail_futex(true))))
1198 return -EDEADLK;
1199
1200 /*
1201 * Lookup existing state first. If it exists, try to attach to
1202 * its pi_state.
1203 */
1204 match = futex_top_waiter(hb, key);
1205 if (match)
1206 return attach_to_pi_state(uval, match->pi_state, ps);
1207
1208 /*
1209 * No waiter and user TID is 0. We are here because the
1210 * waiters or the owner died bit is set or called from
1211 * requeue_cmp_pi or for whatever reason something took the
1212 * syscall.
1213 */
1214 if (!(uval & FUTEX_TID_MASK)) {
1215 /*
1216 * We take over the futex. No other waiters and the user space
1217 * TID is 0. We preserve the owner died bit.
1218 */
1219 newval = uval & FUTEX_OWNER_DIED;
1220 newval |= vpid;
1221
1222 /* The futex requeue_pi code can enforce the waiters bit */
1223 if (set_waiters)
1224 newval |= FUTEX_WAITERS;
1225
1226 ret = lock_pi_update_atomic(uaddr, uval, newval);
1227 /* If the take over worked, return 1 */
1228 return ret < 0 ? ret : 1;
1229 }
1230
1231 /*
1232 * First waiter. Set the waiters bit before attaching ourself to
1233 * the owner. If owner tries to unlock, it will be forced into
1234 * the kernel and blocked on hb->lock.
1235 */
1236 newval = uval | FUTEX_WAITERS;
1237 ret = lock_pi_update_atomic(uaddr, uval, newval);
1238 if (ret)
1239 return ret;
1240 /*
1241 * If the update of the user space value succeeded, we try to
1242 * attach to the owner. If that fails, no harm done, we only
1243 * set the FUTEX_WAITERS bit in the user space variable.
1244 */
1245 return attach_to_pi_owner(uval, key, ps);
1246 }
1247
1248 /**
1249 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1250 * @q: The futex_q to unqueue
1251 *
1252 * The q->lock_ptr must not be NULL and must be held by the caller.
1253 */
__unqueue_futex(struct futex_q * q)1254 static void __unqueue_futex(struct futex_q *q)
1255 {
1256 struct futex_hash_bucket *hb;
1257
1258 if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1259 || WARN_ON(plist_node_empty(&q->list)))
1260 return;
1261
1262 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1263 plist_del(&q->list, &hb->chain);
1264 hb_waiters_dec(hb);
1265 }
1266
1267 /*
1268 * The hash bucket lock must be held when this is called.
1269 * Afterwards, the futex_q must not be accessed. Callers
1270 * must ensure to later call wake_up_q() for the actual
1271 * wakeups to occur.
1272 */
mark_wake_futex(struct wake_q_head * wake_q,struct futex_q * q)1273 static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1274 {
1275 struct task_struct *p = q->task;
1276
1277 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1278 return;
1279
1280 /*
1281 * Queue the task for later wakeup for after we've released
1282 * the hb->lock. wake_q_add() grabs reference to p.
1283 */
1284 wake_q_add(wake_q, p);
1285 __unqueue_futex(q);
1286 /*
1287 * The waiting task can free the futex_q as soon as
1288 * q->lock_ptr = NULL is written, without taking any locks. A
1289 * memory barrier is required here to prevent the following
1290 * store to lock_ptr from getting ahead of the plist_del.
1291 */
1292 smp_wmb();
1293 q->lock_ptr = NULL;
1294 }
1295
wake_futex_pi(u32 __user * uaddr,u32 uval,struct futex_q * this,struct futex_hash_bucket * hb)1296 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1297 struct futex_hash_bucket *hb)
1298 {
1299 struct task_struct *new_owner;
1300 struct futex_pi_state *pi_state = this->pi_state;
1301 u32 uninitialized_var(curval), newval;
1302 WAKE_Q(wake_q);
1303 bool deboost;
1304 int ret = 0;
1305
1306 if (!pi_state)
1307 return -EINVAL;
1308
1309 /*
1310 * If current does not own the pi_state then the futex is
1311 * inconsistent and user space fiddled with the futex value.
1312 */
1313 if (pi_state->owner != current)
1314 return -EINVAL;
1315
1316 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1317 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1318
1319 /*
1320 * It is possible that the next waiter (the one that brought
1321 * this owner to the kernel) timed out and is no longer
1322 * waiting on the lock.
1323 */
1324 if (!new_owner)
1325 new_owner = this->task;
1326
1327 /*
1328 * We pass it to the next owner. The WAITERS bit is always
1329 * kept enabled while there is PI state around. We cleanup the
1330 * owner died bit, because we are the owner.
1331 */
1332 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1333
1334 if (unlikely(should_fail_futex(true)))
1335 ret = -EFAULT;
1336
1337 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1338 ret = -EFAULT;
1339 } else if (curval != uval) {
1340 /*
1341 * If a unconditional UNLOCK_PI operation (user space did not
1342 * try the TID->0 transition) raced with a waiter setting the
1343 * FUTEX_WAITERS flag between get_user() and locking the hash
1344 * bucket lock, retry the operation.
1345 */
1346 if ((FUTEX_TID_MASK & curval) == uval)
1347 ret = -EAGAIN;
1348 else
1349 ret = -EINVAL;
1350 }
1351 if (ret) {
1352 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1353 return ret;
1354 }
1355
1356 raw_spin_lock(&pi_state->owner->pi_lock);
1357 WARN_ON(list_empty(&pi_state->list));
1358 list_del_init(&pi_state->list);
1359 raw_spin_unlock(&pi_state->owner->pi_lock);
1360
1361 raw_spin_lock(&new_owner->pi_lock);
1362 WARN_ON(!list_empty(&pi_state->list));
1363 list_add(&pi_state->list, &new_owner->pi_state_list);
1364 pi_state->owner = new_owner;
1365 raw_spin_unlock(&new_owner->pi_lock);
1366
1367 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1368
1369 deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1370
1371 /*
1372 * First unlock HB so the waiter does not spin on it once he got woken
1373 * up. Second wake up the waiter before the priority is adjusted. If we
1374 * deboost first (and lose our higher priority), then the task might get
1375 * scheduled away before the wake up can take place.
1376 */
1377 spin_unlock(&hb->lock);
1378 wake_up_q(&wake_q);
1379 if (deboost)
1380 rt_mutex_adjust_prio(current);
1381
1382 return 0;
1383 }
1384
1385 /*
1386 * Express the locking dependencies for lockdep:
1387 */
1388 static inline void
double_lock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)1389 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1390 {
1391 if (hb1 <= hb2) {
1392 spin_lock(&hb1->lock);
1393 if (hb1 < hb2)
1394 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1395 } else { /* hb1 > hb2 */
1396 spin_lock(&hb2->lock);
1397 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1398 }
1399 }
1400
1401 static inline void
double_unlock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)1402 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1403 {
1404 spin_unlock(&hb1->lock);
1405 if (hb1 != hb2)
1406 spin_unlock(&hb2->lock);
1407 }
1408
1409 /*
1410 * Wake up waiters matching bitset queued on this futex (uaddr).
1411 */
1412 static int
futex_wake(u32 __user * uaddr,unsigned int flags,int nr_wake,u32 bitset)1413 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1414 {
1415 struct futex_hash_bucket *hb;
1416 struct futex_q *this, *next;
1417 union futex_key key = FUTEX_KEY_INIT;
1418 int ret;
1419 WAKE_Q(wake_q);
1420
1421 if (!bitset)
1422 return -EINVAL;
1423
1424 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
1425 if (unlikely(ret != 0))
1426 goto out;
1427
1428 hb = hash_futex(&key);
1429
1430 /* Make sure we really have tasks to wakeup */
1431 if (!hb_waiters_pending(hb))
1432 goto out_put_key;
1433
1434 spin_lock(&hb->lock);
1435
1436 plist_for_each_entry_safe(this, next, &hb->chain, list) {
1437 if (match_futex (&this->key, &key)) {
1438 if (this->pi_state || this->rt_waiter) {
1439 ret = -EINVAL;
1440 break;
1441 }
1442
1443 /* Check if one of the bits is set in both bitsets */
1444 if (!(this->bitset & bitset))
1445 continue;
1446
1447 mark_wake_futex(&wake_q, this);
1448 if (++ret >= nr_wake)
1449 break;
1450 }
1451 }
1452
1453 spin_unlock(&hb->lock);
1454 wake_up_q(&wake_q);
1455 out_put_key:
1456 put_futex_key(&key);
1457 out:
1458 return ret;
1459 }
1460
1461 /*
1462 * Wake up all waiters hashed on the physical page that is mapped
1463 * to this virtual address:
1464 */
1465 static int
futex_wake_op(u32 __user * uaddr1,unsigned int flags,u32 __user * uaddr2,int nr_wake,int nr_wake2,int op)1466 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1467 int nr_wake, int nr_wake2, int op)
1468 {
1469 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1470 struct futex_hash_bucket *hb1, *hb2;
1471 struct futex_q *this, *next;
1472 int ret, op_ret;
1473 WAKE_Q(wake_q);
1474
1475 retry:
1476 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1477 if (unlikely(ret != 0))
1478 goto out;
1479 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1480 if (unlikely(ret != 0))
1481 goto out_put_key1;
1482
1483 hb1 = hash_futex(&key1);
1484 hb2 = hash_futex(&key2);
1485
1486 retry_private:
1487 double_lock_hb(hb1, hb2);
1488 op_ret = futex_atomic_op_inuser(op, uaddr2);
1489 if (unlikely(op_ret < 0)) {
1490
1491 double_unlock_hb(hb1, hb2);
1492
1493 #ifndef CONFIG_MMU
1494 /*
1495 * we don't get EFAULT from MMU faults if we don't have an MMU,
1496 * but we might get them from range checking
1497 */
1498 ret = op_ret;
1499 goto out_put_keys;
1500 #endif
1501
1502 if (unlikely(op_ret != -EFAULT)) {
1503 ret = op_ret;
1504 goto out_put_keys;
1505 }
1506
1507 ret = fault_in_user_writeable(uaddr2);
1508 if (ret)
1509 goto out_put_keys;
1510
1511 if (!(flags & FLAGS_SHARED))
1512 goto retry_private;
1513
1514 put_futex_key(&key2);
1515 put_futex_key(&key1);
1516 goto retry;
1517 }
1518
1519 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1520 if (match_futex (&this->key, &key1)) {
1521 if (this->pi_state || this->rt_waiter) {
1522 ret = -EINVAL;
1523 goto out_unlock;
1524 }
1525 mark_wake_futex(&wake_q, this);
1526 if (++ret >= nr_wake)
1527 break;
1528 }
1529 }
1530
1531 if (op_ret > 0) {
1532 op_ret = 0;
1533 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1534 if (match_futex (&this->key, &key2)) {
1535 if (this->pi_state || this->rt_waiter) {
1536 ret = -EINVAL;
1537 goto out_unlock;
1538 }
1539 mark_wake_futex(&wake_q, this);
1540 if (++op_ret >= nr_wake2)
1541 break;
1542 }
1543 }
1544 ret += op_ret;
1545 }
1546
1547 out_unlock:
1548 double_unlock_hb(hb1, hb2);
1549 wake_up_q(&wake_q);
1550 out_put_keys:
1551 put_futex_key(&key2);
1552 out_put_key1:
1553 put_futex_key(&key1);
1554 out:
1555 return ret;
1556 }
1557
1558 /**
1559 * requeue_futex() - Requeue a futex_q from one hb to another
1560 * @q: the futex_q to requeue
1561 * @hb1: the source hash_bucket
1562 * @hb2: the target hash_bucket
1563 * @key2: the new key for the requeued futex_q
1564 */
1565 static inline
requeue_futex(struct futex_q * q,struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2,union futex_key * key2)1566 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1567 struct futex_hash_bucket *hb2, union futex_key *key2)
1568 {
1569
1570 /*
1571 * If key1 and key2 hash to the same bucket, no need to
1572 * requeue.
1573 */
1574 if (likely(&hb1->chain != &hb2->chain)) {
1575 plist_del(&q->list, &hb1->chain);
1576 hb_waiters_dec(hb1);
1577 hb_waiters_inc(hb2);
1578 plist_add(&q->list, &hb2->chain);
1579 q->lock_ptr = &hb2->lock;
1580 }
1581 get_futex_key_refs(key2);
1582 q->key = *key2;
1583 }
1584
1585 /**
1586 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1587 * @q: the futex_q
1588 * @key: the key of the requeue target futex
1589 * @hb: the hash_bucket of the requeue target futex
1590 *
1591 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1592 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1593 * to the requeue target futex so the waiter can detect the wakeup on the right
1594 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1595 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1596 * to protect access to the pi_state to fixup the owner later. Must be called
1597 * with both q->lock_ptr and hb->lock held.
1598 */
1599 static inline
requeue_pi_wake_futex(struct futex_q * q,union futex_key * key,struct futex_hash_bucket * hb)1600 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1601 struct futex_hash_bucket *hb)
1602 {
1603 get_futex_key_refs(key);
1604 q->key = *key;
1605
1606 __unqueue_futex(q);
1607
1608 WARN_ON(!q->rt_waiter);
1609 q->rt_waiter = NULL;
1610
1611 q->lock_ptr = &hb->lock;
1612
1613 wake_up_state(q->task, TASK_NORMAL);
1614 }
1615
1616 /**
1617 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1618 * @pifutex: the user address of the to futex
1619 * @hb1: the from futex hash bucket, must be locked by the caller
1620 * @hb2: the to futex hash bucket, must be locked by the caller
1621 * @key1: the from futex key
1622 * @key2: the to futex key
1623 * @ps: address to store the pi_state pointer
1624 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1625 *
1626 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1627 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1628 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1629 * hb1 and hb2 must be held by the caller.
1630 *
1631 * Return:
1632 * 0 - failed to acquire the lock atomically;
1633 * >0 - acquired the lock, return value is vpid of the top_waiter
1634 * <0 - error
1635 */
futex_proxy_trylock_atomic(u32 __user * pifutex,struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2,union futex_key * key1,union futex_key * key2,struct futex_pi_state ** ps,int set_waiters)1636 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1637 struct futex_hash_bucket *hb1,
1638 struct futex_hash_bucket *hb2,
1639 union futex_key *key1, union futex_key *key2,
1640 struct futex_pi_state **ps, int set_waiters)
1641 {
1642 struct futex_q *top_waiter = NULL;
1643 u32 curval;
1644 int ret, vpid;
1645
1646 if (get_futex_value_locked(&curval, pifutex))
1647 return -EFAULT;
1648
1649 if (unlikely(should_fail_futex(true)))
1650 return -EFAULT;
1651
1652 /*
1653 * Find the top_waiter and determine if there are additional waiters.
1654 * If the caller intends to requeue more than 1 waiter to pifutex,
1655 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1656 * as we have means to handle the possible fault. If not, don't set
1657 * the bit unecessarily as it will force the subsequent unlock to enter
1658 * the kernel.
1659 */
1660 top_waiter = futex_top_waiter(hb1, key1);
1661
1662 /* There are no waiters, nothing for us to do. */
1663 if (!top_waiter)
1664 return 0;
1665
1666 /* Ensure we requeue to the expected futex. */
1667 if (!match_futex(top_waiter->requeue_pi_key, key2))
1668 return -EINVAL;
1669
1670 /*
1671 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1672 * the contended case or if set_waiters is 1. The pi_state is returned
1673 * in ps in contended cases.
1674 */
1675 vpid = task_pid_vnr(top_waiter->task);
1676 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1677 set_waiters);
1678 if (ret == 1) {
1679 requeue_pi_wake_futex(top_waiter, key2, hb2);
1680 return vpid;
1681 }
1682 return ret;
1683 }
1684
1685 /**
1686 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1687 * @uaddr1: source futex user address
1688 * @flags: futex flags (FLAGS_SHARED, etc.)
1689 * @uaddr2: target futex user address
1690 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1691 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1692 * @cmpval: @uaddr1 expected value (or %NULL)
1693 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1694 * pi futex (pi to pi requeue is not supported)
1695 *
1696 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1697 * uaddr2 atomically on behalf of the top waiter.
1698 *
1699 * Return:
1700 * >=0 - on success, the number of tasks requeued or woken;
1701 * <0 - on error
1702 */
futex_requeue(u32 __user * uaddr1,unsigned int flags,u32 __user * uaddr2,int nr_wake,int nr_requeue,u32 * cmpval,int requeue_pi)1703 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1704 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1705 u32 *cmpval, int requeue_pi)
1706 {
1707 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1708 int drop_count = 0, task_count = 0, ret;
1709 struct futex_pi_state *pi_state = NULL;
1710 struct futex_hash_bucket *hb1, *hb2;
1711 struct futex_q *this, *next;
1712 WAKE_Q(wake_q);
1713
1714 if (nr_wake < 0 || nr_requeue < 0)
1715 return -EINVAL;
1716
1717 if (requeue_pi) {
1718 /*
1719 * Requeue PI only works on two distinct uaddrs. This
1720 * check is only valid for private futexes. See below.
1721 */
1722 if (uaddr1 == uaddr2)
1723 return -EINVAL;
1724
1725 /*
1726 * requeue_pi requires a pi_state, try to allocate it now
1727 * without any locks in case it fails.
1728 */
1729 if (refill_pi_state_cache())
1730 return -ENOMEM;
1731 /*
1732 * requeue_pi must wake as many tasks as it can, up to nr_wake
1733 * + nr_requeue, since it acquires the rt_mutex prior to
1734 * returning to userspace, so as to not leave the rt_mutex with
1735 * waiters and no owner. However, second and third wake-ups
1736 * cannot be predicted as they involve race conditions with the
1737 * first wake and a fault while looking up the pi_state. Both
1738 * pthread_cond_signal() and pthread_cond_broadcast() should
1739 * use nr_wake=1.
1740 */
1741 if (nr_wake != 1)
1742 return -EINVAL;
1743 }
1744
1745 retry:
1746 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1747 if (unlikely(ret != 0))
1748 goto out;
1749 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1750 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1751 if (unlikely(ret != 0))
1752 goto out_put_key1;
1753
1754 /*
1755 * The check above which compares uaddrs is not sufficient for
1756 * shared futexes. We need to compare the keys:
1757 */
1758 if (requeue_pi && match_futex(&key1, &key2)) {
1759 ret = -EINVAL;
1760 goto out_put_keys;
1761 }
1762
1763 hb1 = hash_futex(&key1);
1764 hb2 = hash_futex(&key2);
1765
1766 retry_private:
1767 hb_waiters_inc(hb2);
1768 double_lock_hb(hb1, hb2);
1769
1770 if (likely(cmpval != NULL)) {
1771 u32 curval;
1772
1773 ret = get_futex_value_locked(&curval, uaddr1);
1774
1775 if (unlikely(ret)) {
1776 double_unlock_hb(hb1, hb2);
1777 hb_waiters_dec(hb2);
1778
1779 ret = get_user(curval, uaddr1);
1780 if (ret)
1781 goto out_put_keys;
1782
1783 if (!(flags & FLAGS_SHARED))
1784 goto retry_private;
1785
1786 put_futex_key(&key2);
1787 put_futex_key(&key1);
1788 goto retry;
1789 }
1790 if (curval != *cmpval) {
1791 ret = -EAGAIN;
1792 goto out_unlock;
1793 }
1794 }
1795
1796 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1797 /*
1798 * Attempt to acquire uaddr2 and wake the top waiter. If we
1799 * intend to requeue waiters, force setting the FUTEX_WAITERS
1800 * bit. We force this here where we are able to easily handle
1801 * faults rather in the requeue loop below.
1802 */
1803 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1804 &key2, &pi_state, nr_requeue);
1805
1806 /*
1807 * At this point the top_waiter has either taken uaddr2 or is
1808 * waiting on it. If the former, then the pi_state will not
1809 * exist yet, look it up one more time to ensure we have a
1810 * reference to it. If the lock was taken, ret contains the
1811 * vpid of the top waiter task.
1812 * If the lock was not taken, we have pi_state and an initial
1813 * refcount on it. In case of an error we have nothing.
1814 */
1815 if (ret > 0) {
1816 WARN_ON(pi_state);
1817 drop_count++;
1818 task_count++;
1819 /*
1820 * If we acquired the lock, then the user space value
1821 * of uaddr2 should be vpid. It cannot be changed by
1822 * the top waiter as it is blocked on hb2 lock if it
1823 * tries to do so. If something fiddled with it behind
1824 * our back the pi state lookup might unearth it. So
1825 * we rather use the known value than rereading and
1826 * handing potential crap to lookup_pi_state.
1827 *
1828 * If that call succeeds then we have pi_state and an
1829 * initial refcount on it.
1830 */
1831 ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1832 }
1833
1834 switch (ret) {
1835 case 0:
1836 /* We hold a reference on the pi state. */
1837 break;
1838
1839 /* If the above failed, then pi_state is NULL */
1840 case -EFAULT:
1841 double_unlock_hb(hb1, hb2);
1842 hb_waiters_dec(hb2);
1843 put_futex_key(&key2);
1844 put_futex_key(&key1);
1845 ret = fault_in_user_writeable(uaddr2);
1846 if (!ret)
1847 goto retry;
1848 goto out;
1849 case -EAGAIN:
1850 /*
1851 * Two reasons for this:
1852 * - Owner is exiting and we just wait for the
1853 * exit to complete.
1854 * - The user space value changed.
1855 */
1856 double_unlock_hb(hb1, hb2);
1857 hb_waiters_dec(hb2);
1858 put_futex_key(&key2);
1859 put_futex_key(&key1);
1860 cond_resched();
1861 goto retry;
1862 default:
1863 goto out_unlock;
1864 }
1865 }
1866
1867 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1868 if (task_count - nr_wake >= nr_requeue)
1869 break;
1870
1871 if (!match_futex(&this->key, &key1))
1872 continue;
1873
1874 /*
1875 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1876 * be paired with each other and no other futex ops.
1877 *
1878 * We should never be requeueing a futex_q with a pi_state,
1879 * which is awaiting a futex_unlock_pi().
1880 */
1881 if ((requeue_pi && !this->rt_waiter) ||
1882 (!requeue_pi && this->rt_waiter) ||
1883 this->pi_state) {
1884 ret = -EINVAL;
1885 break;
1886 }
1887
1888 /*
1889 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1890 * lock, we already woke the top_waiter. If not, it will be
1891 * woken by futex_unlock_pi().
1892 */
1893 if (++task_count <= nr_wake && !requeue_pi) {
1894 mark_wake_futex(&wake_q, this);
1895 continue;
1896 }
1897
1898 /* Ensure we requeue to the expected futex for requeue_pi. */
1899 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1900 ret = -EINVAL;
1901 break;
1902 }
1903
1904 /*
1905 * Requeue nr_requeue waiters and possibly one more in the case
1906 * of requeue_pi if we couldn't acquire the lock atomically.
1907 */
1908 if (requeue_pi) {
1909 /*
1910 * Prepare the waiter to take the rt_mutex. Take a
1911 * refcount on the pi_state and store the pointer in
1912 * the futex_q object of the waiter.
1913 */
1914 atomic_inc(&pi_state->refcount);
1915 this->pi_state = pi_state;
1916 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1917 this->rt_waiter,
1918 this->task);
1919 if (ret == 1) {
1920 /*
1921 * We got the lock. We do neither drop the
1922 * refcount on pi_state nor clear
1923 * this->pi_state because the waiter needs the
1924 * pi_state for cleaning up the user space
1925 * value. It will drop the refcount after
1926 * doing so.
1927 */
1928 requeue_pi_wake_futex(this, &key2, hb2);
1929 drop_count++;
1930 continue;
1931 } else if (ret) {
1932 /*
1933 * rt_mutex_start_proxy_lock() detected a
1934 * potential deadlock when we tried to queue
1935 * that waiter. Drop the pi_state reference
1936 * which we took above and remove the pointer
1937 * to the state from the waiters futex_q
1938 * object.
1939 */
1940 this->pi_state = NULL;
1941 put_pi_state(pi_state);
1942 /*
1943 * We stop queueing more waiters and let user
1944 * space deal with the mess.
1945 */
1946 break;
1947 }
1948 }
1949 requeue_futex(this, hb1, hb2, &key2);
1950 drop_count++;
1951 }
1952
1953 /*
1954 * We took an extra initial reference to the pi_state either
1955 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
1956 * need to drop it here again.
1957 */
1958 put_pi_state(pi_state);
1959
1960 out_unlock:
1961 double_unlock_hb(hb1, hb2);
1962 wake_up_q(&wake_q);
1963 hb_waiters_dec(hb2);
1964
1965 /*
1966 * drop_futex_key_refs() must be called outside the spinlocks. During
1967 * the requeue we moved futex_q's from the hash bucket at key1 to the
1968 * one at key2 and updated their key pointer. We no longer need to
1969 * hold the references to key1.
1970 */
1971 while (--drop_count >= 0)
1972 drop_futex_key_refs(&key1);
1973
1974 out_put_keys:
1975 put_futex_key(&key2);
1976 out_put_key1:
1977 put_futex_key(&key1);
1978 out:
1979 return ret ? ret : task_count;
1980 }
1981
1982 /* The key must be already stored in q->key. */
queue_lock(struct futex_q * q)1983 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1984 __acquires(&hb->lock)
1985 {
1986 struct futex_hash_bucket *hb;
1987
1988 hb = hash_futex(&q->key);
1989
1990 /*
1991 * Increment the counter before taking the lock so that
1992 * a potential waker won't miss a to-be-slept task that is
1993 * waiting for the spinlock. This is safe as all queue_lock()
1994 * users end up calling queue_me(). Similarly, for housekeeping,
1995 * decrement the counter at queue_unlock() when some error has
1996 * occurred and we don't end up adding the task to the list.
1997 */
1998 hb_waiters_inc(hb);
1999
2000 q->lock_ptr = &hb->lock;
2001
2002 spin_lock(&hb->lock); /* implies smp_mb(); (A) */
2003 return hb;
2004 }
2005
2006 static inline void
queue_unlock(struct futex_hash_bucket * hb)2007 queue_unlock(struct futex_hash_bucket *hb)
2008 __releases(&hb->lock)
2009 {
2010 spin_unlock(&hb->lock);
2011 hb_waiters_dec(hb);
2012 }
2013
2014 /**
2015 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2016 * @q: The futex_q to enqueue
2017 * @hb: The destination hash bucket
2018 *
2019 * The hb->lock must be held by the caller, and is released here. A call to
2020 * queue_me() is typically paired with exactly one call to unqueue_me(). The
2021 * exceptions involve the PI related operations, which may use unqueue_me_pi()
2022 * or nothing if the unqueue is done as part of the wake process and the unqueue
2023 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2024 * an example).
2025 */
queue_me(struct futex_q * q,struct futex_hash_bucket * hb)2026 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2027 __releases(&hb->lock)
2028 {
2029 int prio;
2030
2031 /*
2032 * The priority used to register this element is
2033 * - either the real thread-priority for the real-time threads
2034 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2035 * - or MAX_RT_PRIO for non-RT threads.
2036 * Thus, all RT-threads are woken first in priority order, and
2037 * the others are woken last, in FIFO order.
2038 */
2039 prio = min(current->normal_prio, MAX_RT_PRIO);
2040
2041 plist_node_init(&q->list, prio);
2042 plist_add(&q->list, &hb->chain);
2043 q->task = current;
2044 spin_unlock(&hb->lock);
2045 }
2046
2047 /**
2048 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2049 * @q: The futex_q to unqueue
2050 *
2051 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2052 * be paired with exactly one earlier call to queue_me().
2053 *
2054 * Return:
2055 * 1 - if the futex_q was still queued (and we removed unqueued it);
2056 * 0 - if the futex_q was already removed by the waking thread
2057 */
unqueue_me(struct futex_q * q)2058 static int unqueue_me(struct futex_q *q)
2059 {
2060 spinlock_t *lock_ptr;
2061 int ret = 0;
2062
2063 /* In the common case we don't take the spinlock, which is nice. */
2064 retry:
2065 /*
2066 * q->lock_ptr can change between this read and the following spin_lock.
2067 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2068 * optimizing lock_ptr out of the logic below.
2069 */
2070 lock_ptr = READ_ONCE(q->lock_ptr);
2071 if (lock_ptr != NULL) {
2072 spin_lock(lock_ptr);
2073 /*
2074 * q->lock_ptr can change between reading it and
2075 * spin_lock(), causing us to take the wrong lock. This
2076 * corrects the race condition.
2077 *
2078 * Reasoning goes like this: if we have the wrong lock,
2079 * q->lock_ptr must have changed (maybe several times)
2080 * between reading it and the spin_lock(). It can
2081 * change again after the spin_lock() but only if it was
2082 * already changed before the spin_lock(). It cannot,
2083 * however, change back to the original value. Therefore
2084 * we can detect whether we acquired the correct lock.
2085 */
2086 if (unlikely(lock_ptr != q->lock_ptr)) {
2087 spin_unlock(lock_ptr);
2088 goto retry;
2089 }
2090 __unqueue_futex(q);
2091
2092 BUG_ON(q->pi_state);
2093
2094 spin_unlock(lock_ptr);
2095 ret = 1;
2096 }
2097
2098 drop_futex_key_refs(&q->key);
2099 return ret;
2100 }
2101
2102 /*
2103 * PI futexes can not be requeued and must remove themself from the
2104 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2105 * and dropped here.
2106 */
unqueue_me_pi(struct futex_q * q)2107 static void unqueue_me_pi(struct futex_q *q)
2108 __releases(q->lock_ptr)
2109 {
2110 __unqueue_futex(q);
2111
2112 BUG_ON(!q->pi_state);
2113 put_pi_state(q->pi_state);
2114 q->pi_state = NULL;
2115
2116 spin_unlock(q->lock_ptr);
2117 }
2118
2119 /*
2120 * Fixup the pi_state owner with the new owner.
2121 *
2122 * Must be called with hash bucket lock held and mm->sem held for non
2123 * private futexes.
2124 */
fixup_pi_state_owner(u32 __user * uaddr,struct futex_q * q,struct task_struct * newowner)2125 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2126 struct task_struct *newowner)
2127 {
2128 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2129 struct futex_pi_state *pi_state = q->pi_state;
2130 struct task_struct *oldowner = pi_state->owner;
2131 u32 uval, uninitialized_var(curval), newval;
2132 int ret;
2133
2134 /* Owner died? */
2135 if (!pi_state->owner)
2136 newtid |= FUTEX_OWNER_DIED;
2137
2138 /*
2139 * We are here either because we stole the rtmutex from the
2140 * previous highest priority waiter or we are the highest priority
2141 * waiter but failed to get the rtmutex the first time.
2142 * We have to replace the newowner TID in the user space variable.
2143 * This must be atomic as we have to preserve the owner died bit here.
2144 *
2145 * Note: We write the user space value _before_ changing the pi_state
2146 * because we can fault here. Imagine swapped out pages or a fork
2147 * that marked all the anonymous memory readonly for cow.
2148 *
2149 * Modifying pi_state _before_ the user space value would
2150 * leave the pi_state in an inconsistent state when we fault
2151 * here, because we need to drop the hash bucket lock to
2152 * handle the fault. This might be observed in the PID check
2153 * in lookup_pi_state.
2154 */
2155 retry:
2156 if (get_futex_value_locked(&uval, uaddr))
2157 goto handle_fault;
2158
2159 while (1) {
2160 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2161
2162 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2163 goto handle_fault;
2164 if (curval == uval)
2165 break;
2166 uval = curval;
2167 }
2168
2169 /*
2170 * We fixed up user space. Now we need to fix the pi_state
2171 * itself.
2172 */
2173 if (pi_state->owner != NULL) {
2174 raw_spin_lock_irq(&pi_state->owner->pi_lock);
2175 WARN_ON(list_empty(&pi_state->list));
2176 list_del_init(&pi_state->list);
2177 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
2178 }
2179
2180 pi_state->owner = newowner;
2181
2182 raw_spin_lock_irq(&newowner->pi_lock);
2183 WARN_ON(!list_empty(&pi_state->list));
2184 list_add(&pi_state->list, &newowner->pi_state_list);
2185 raw_spin_unlock_irq(&newowner->pi_lock);
2186 return 0;
2187
2188 /*
2189 * To handle the page fault we need to drop the hash bucket
2190 * lock here. That gives the other task (either the highest priority
2191 * waiter itself or the task which stole the rtmutex) the
2192 * chance to try the fixup of the pi_state. So once we are
2193 * back from handling the fault we need to check the pi_state
2194 * after reacquiring the hash bucket lock and before trying to
2195 * do another fixup. When the fixup has been done already we
2196 * simply return.
2197 */
2198 handle_fault:
2199 spin_unlock(q->lock_ptr);
2200
2201 ret = fault_in_user_writeable(uaddr);
2202
2203 spin_lock(q->lock_ptr);
2204
2205 /*
2206 * Check if someone else fixed it for us:
2207 */
2208 if (pi_state->owner != oldowner)
2209 return 0;
2210
2211 if (ret)
2212 return ret;
2213
2214 goto retry;
2215 }
2216
2217 static long futex_wait_restart(struct restart_block *restart);
2218
2219 /**
2220 * fixup_owner() - Post lock pi_state and corner case management
2221 * @uaddr: user address of the futex
2222 * @q: futex_q (contains pi_state and access to the rt_mutex)
2223 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2224 *
2225 * After attempting to lock an rt_mutex, this function is called to cleanup
2226 * the pi_state owner as well as handle race conditions that may allow us to
2227 * acquire the lock. Must be called with the hb lock held.
2228 *
2229 * Return:
2230 * 1 - success, lock taken;
2231 * 0 - success, lock not taken;
2232 * <0 - on error (-EFAULT)
2233 */
fixup_owner(u32 __user * uaddr,struct futex_q * q,int locked)2234 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2235 {
2236 struct task_struct *owner;
2237 int ret = 0;
2238
2239 if (locked) {
2240 /*
2241 * Got the lock. We might not be the anticipated owner if we
2242 * did a lock-steal - fix up the PI-state in that case:
2243 */
2244 if (q->pi_state->owner != current)
2245 ret = fixup_pi_state_owner(uaddr, q, current);
2246 goto out;
2247 }
2248
2249 /*
2250 * Catch the rare case, where the lock was released when we were on the
2251 * way back before we locked the hash bucket.
2252 */
2253 if (q->pi_state->owner == current) {
2254 /*
2255 * Try to get the rt_mutex now. This might fail as some other
2256 * task acquired the rt_mutex after we removed ourself from the
2257 * rt_mutex waiters list.
2258 */
2259 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
2260 locked = 1;
2261 goto out;
2262 }
2263
2264 /*
2265 * pi_state is incorrect, some other task did a lock steal and
2266 * we returned due to timeout or signal without taking the
2267 * rt_mutex. Too late.
2268 */
2269 raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
2270 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2271 if (!owner)
2272 owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
2273 raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
2274 ret = fixup_pi_state_owner(uaddr, q, owner);
2275 goto out;
2276 }
2277
2278 /*
2279 * Paranoia check. If we did not take the lock, then we should not be
2280 * the owner of the rt_mutex.
2281 */
2282 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
2283 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2284 "pi-state %p\n", ret,
2285 q->pi_state->pi_mutex.owner,
2286 q->pi_state->owner);
2287
2288 out:
2289 return ret ? ret : locked;
2290 }
2291
2292 /**
2293 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2294 * @hb: the futex hash bucket, must be locked by the caller
2295 * @q: the futex_q to queue up on
2296 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2297 */
futex_wait_queue_me(struct futex_hash_bucket * hb,struct futex_q * q,struct hrtimer_sleeper * timeout)2298 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2299 struct hrtimer_sleeper *timeout)
2300 {
2301 /*
2302 * The task state is guaranteed to be set before another task can
2303 * wake it. set_current_state() is implemented using smp_store_mb() and
2304 * queue_me() calls spin_unlock() upon completion, both serializing
2305 * access to the hash list and forcing another memory barrier.
2306 */
2307 set_current_state(TASK_INTERRUPTIBLE);
2308 queue_me(q, hb);
2309
2310 /* Arm the timer */
2311 if (timeout)
2312 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
2313
2314 /*
2315 * If we have been removed from the hash list, then another task
2316 * has tried to wake us, and we can skip the call to schedule().
2317 */
2318 if (likely(!plist_node_empty(&q->list))) {
2319 /*
2320 * If the timer has already expired, current will already be
2321 * flagged for rescheduling. Only call schedule if there
2322 * is no timeout, or if it has yet to expire.
2323 */
2324 if (!timeout || timeout->task)
2325 freezable_schedule();
2326 }
2327 __set_current_state(TASK_RUNNING);
2328 }
2329
2330 /**
2331 * futex_wait_setup() - Prepare to wait on a futex
2332 * @uaddr: the futex userspace address
2333 * @val: the expected value
2334 * @flags: futex flags (FLAGS_SHARED, etc.)
2335 * @q: the associated futex_q
2336 * @hb: storage for hash_bucket pointer to be returned to caller
2337 *
2338 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2339 * compare it with the expected value. Handle atomic faults internally.
2340 * Return with the hb lock held and a q.key reference on success, and unlocked
2341 * with no q.key reference on failure.
2342 *
2343 * Return:
2344 * 0 - uaddr contains val and hb has been locked;
2345 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2346 */
futex_wait_setup(u32 __user * uaddr,u32 val,unsigned int flags,struct futex_q * q,struct futex_hash_bucket ** hb)2347 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2348 struct futex_q *q, struct futex_hash_bucket **hb)
2349 {
2350 u32 uval;
2351 int ret;
2352
2353 /*
2354 * Access the page AFTER the hash-bucket is locked.
2355 * Order is important:
2356 *
2357 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2358 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2359 *
2360 * The basic logical guarantee of a futex is that it blocks ONLY
2361 * if cond(var) is known to be true at the time of blocking, for
2362 * any cond. If we locked the hash-bucket after testing *uaddr, that
2363 * would open a race condition where we could block indefinitely with
2364 * cond(var) false, which would violate the guarantee.
2365 *
2366 * On the other hand, we insert q and release the hash-bucket only
2367 * after testing *uaddr. This guarantees that futex_wait() will NOT
2368 * absorb a wakeup if *uaddr does not match the desired values
2369 * while the syscall executes.
2370 */
2371 retry:
2372 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2373 if (unlikely(ret != 0))
2374 return ret;
2375
2376 retry_private:
2377 *hb = queue_lock(q);
2378
2379 ret = get_futex_value_locked(&uval, uaddr);
2380
2381 if (ret) {
2382 queue_unlock(*hb);
2383
2384 ret = get_user(uval, uaddr);
2385 if (ret)
2386 goto out;
2387
2388 if (!(flags & FLAGS_SHARED))
2389 goto retry_private;
2390
2391 put_futex_key(&q->key);
2392 goto retry;
2393 }
2394
2395 if (uval != val) {
2396 queue_unlock(*hb);
2397 ret = -EWOULDBLOCK;
2398 }
2399
2400 out:
2401 if (ret)
2402 put_futex_key(&q->key);
2403 return ret;
2404 }
2405
futex_wait(u32 __user * uaddr,unsigned int flags,u32 val,ktime_t * abs_time,u32 bitset)2406 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2407 ktime_t *abs_time, u32 bitset)
2408 {
2409 struct hrtimer_sleeper timeout, *to = NULL;
2410 struct restart_block *restart;
2411 struct futex_hash_bucket *hb;
2412 struct futex_q q = futex_q_init;
2413 int ret;
2414
2415 if (!bitset)
2416 return -EINVAL;
2417 q.bitset = bitset;
2418
2419 if (abs_time) {
2420 to = &timeout;
2421
2422 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2423 CLOCK_REALTIME : CLOCK_MONOTONIC,
2424 HRTIMER_MODE_ABS);
2425 hrtimer_init_sleeper(to, current);
2426 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2427 current->timer_slack_ns);
2428 }
2429
2430 retry:
2431 /*
2432 * Prepare to wait on uaddr. On success, holds hb lock and increments
2433 * q.key refs.
2434 */
2435 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2436 if (ret)
2437 goto out;
2438
2439 /* queue_me and wait for wakeup, timeout, or a signal. */
2440 futex_wait_queue_me(hb, &q, to);
2441
2442 /* If we were woken (and unqueued), we succeeded, whatever. */
2443 ret = 0;
2444 /* unqueue_me() drops q.key ref */
2445 if (!unqueue_me(&q))
2446 goto out;
2447 ret = -ETIMEDOUT;
2448 if (to && !to->task)
2449 goto out;
2450
2451 /*
2452 * We expect signal_pending(current), but we might be the
2453 * victim of a spurious wakeup as well.
2454 */
2455 if (!signal_pending(current))
2456 goto retry;
2457
2458 ret = -ERESTARTSYS;
2459 if (!abs_time)
2460 goto out;
2461
2462 restart = ¤t->restart_block;
2463 restart->fn = futex_wait_restart;
2464 restart->futex.uaddr = uaddr;
2465 restart->futex.val = val;
2466 restart->futex.time = abs_time->tv64;
2467 restart->futex.bitset = bitset;
2468 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2469
2470 ret = -ERESTART_RESTARTBLOCK;
2471
2472 out:
2473 if (to) {
2474 hrtimer_cancel(&to->timer);
2475 destroy_hrtimer_on_stack(&to->timer);
2476 }
2477 return ret;
2478 }
2479
2480
futex_wait_restart(struct restart_block * restart)2481 static long futex_wait_restart(struct restart_block *restart)
2482 {
2483 u32 __user *uaddr = restart->futex.uaddr;
2484 ktime_t t, *tp = NULL;
2485
2486 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2487 t.tv64 = restart->futex.time;
2488 tp = &t;
2489 }
2490 restart->fn = do_no_restart_syscall;
2491
2492 return (long)futex_wait(uaddr, restart->futex.flags,
2493 restart->futex.val, tp, restart->futex.bitset);
2494 }
2495
2496
2497 /*
2498 * Userspace tried a 0 -> TID atomic transition of the futex value
2499 * and failed. The kernel side here does the whole locking operation:
2500 * if there are waiters then it will block as a consequence of relying
2501 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2502 * a 0 value of the futex too.).
2503 *
2504 * Also serves as futex trylock_pi()'ing, and due semantics.
2505 */
futex_lock_pi(u32 __user * uaddr,unsigned int flags,ktime_t * time,int trylock)2506 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2507 ktime_t *time, int trylock)
2508 {
2509 struct hrtimer_sleeper timeout, *to = NULL;
2510 struct futex_hash_bucket *hb;
2511 struct futex_q q = futex_q_init;
2512 int res, ret;
2513
2514 if (refill_pi_state_cache())
2515 return -ENOMEM;
2516
2517 if (time) {
2518 to = &timeout;
2519 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2520 HRTIMER_MODE_ABS);
2521 hrtimer_init_sleeper(to, current);
2522 hrtimer_set_expires(&to->timer, *time);
2523 }
2524
2525 retry:
2526 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2527 if (unlikely(ret != 0))
2528 goto out;
2529
2530 retry_private:
2531 hb = queue_lock(&q);
2532
2533 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2534 if (unlikely(ret)) {
2535 /*
2536 * Atomic work succeeded and we got the lock,
2537 * or failed. Either way, we do _not_ block.
2538 */
2539 switch (ret) {
2540 case 1:
2541 /* We got the lock. */
2542 ret = 0;
2543 goto out_unlock_put_key;
2544 case -EFAULT:
2545 goto uaddr_faulted;
2546 case -EAGAIN:
2547 /*
2548 * Two reasons for this:
2549 * - Task is exiting and we just wait for the
2550 * exit to complete.
2551 * - The user space value changed.
2552 */
2553 queue_unlock(hb);
2554 put_futex_key(&q.key);
2555 cond_resched();
2556 goto retry;
2557 default:
2558 goto out_unlock_put_key;
2559 }
2560 }
2561
2562 /*
2563 * Only actually queue now that the atomic ops are done:
2564 */
2565 queue_me(&q, hb);
2566
2567 WARN_ON(!q.pi_state);
2568 /*
2569 * Block on the PI mutex:
2570 */
2571 if (!trylock) {
2572 ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
2573 } else {
2574 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2575 /* Fixup the trylock return value: */
2576 ret = ret ? 0 : -EWOULDBLOCK;
2577 }
2578
2579 spin_lock(q.lock_ptr);
2580 /*
2581 * Fixup the pi_state owner and possibly acquire the lock if we
2582 * haven't already.
2583 */
2584 res = fixup_owner(uaddr, &q, !ret);
2585 /*
2586 * If fixup_owner() returned an error, proprogate that. If it acquired
2587 * the lock, clear our -ETIMEDOUT or -EINTR.
2588 */
2589 if (res)
2590 ret = (res < 0) ? res : 0;
2591
2592 /*
2593 * If fixup_owner() faulted and was unable to handle the fault, unlock
2594 * it and return the fault to userspace.
2595 */
2596 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2597 rt_mutex_unlock(&q.pi_state->pi_mutex);
2598
2599 /* Unqueue and drop the lock */
2600 unqueue_me_pi(&q);
2601
2602 goto out_put_key;
2603
2604 out_unlock_put_key:
2605 queue_unlock(hb);
2606
2607 out_put_key:
2608 put_futex_key(&q.key);
2609 out:
2610 if (to)
2611 destroy_hrtimer_on_stack(&to->timer);
2612 return ret != -EINTR ? ret : -ERESTARTNOINTR;
2613
2614 uaddr_faulted:
2615 queue_unlock(hb);
2616
2617 ret = fault_in_user_writeable(uaddr);
2618 if (ret)
2619 goto out_put_key;
2620
2621 if (!(flags & FLAGS_SHARED))
2622 goto retry_private;
2623
2624 put_futex_key(&q.key);
2625 goto retry;
2626 }
2627
2628 /*
2629 * Userspace attempted a TID -> 0 atomic transition, and failed.
2630 * This is the in-kernel slowpath: we look up the PI state (if any),
2631 * and do the rt-mutex unlock.
2632 */
futex_unlock_pi(u32 __user * uaddr,unsigned int flags)2633 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2634 {
2635 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2636 union futex_key key = FUTEX_KEY_INIT;
2637 struct futex_hash_bucket *hb;
2638 struct futex_q *match;
2639 int ret;
2640
2641 retry:
2642 if (get_user(uval, uaddr))
2643 return -EFAULT;
2644 /*
2645 * We release only a lock we actually own:
2646 */
2647 if ((uval & FUTEX_TID_MASK) != vpid)
2648 return -EPERM;
2649
2650 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2651 if (ret)
2652 return ret;
2653
2654 hb = hash_futex(&key);
2655 spin_lock(&hb->lock);
2656
2657 /*
2658 * Check waiters first. We do not trust user space values at
2659 * all and we at least want to know if user space fiddled
2660 * with the futex value instead of blindly unlocking.
2661 */
2662 match = futex_top_waiter(hb, &key);
2663 if (match) {
2664 ret = wake_futex_pi(uaddr, uval, match, hb);
2665 /*
2666 * In case of success wake_futex_pi dropped the hash
2667 * bucket lock.
2668 */
2669 if (!ret)
2670 goto out_putkey;
2671 /*
2672 * The atomic access to the futex value generated a
2673 * pagefault, so retry the user-access and the wakeup:
2674 */
2675 if (ret == -EFAULT)
2676 goto pi_faulted;
2677 /*
2678 * A unconditional UNLOCK_PI op raced against a waiter
2679 * setting the FUTEX_WAITERS bit. Try again.
2680 */
2681 if (ret == -EAGAIN) {
2682 spin_unlock(&hb->lock);
2683 put_futex_key(&key);
2684 goto retry;
2685 }
2686 /*
2687 * wake_futex_pi has detected invalid state. Tell user
2688 * space.
2689 */
2690 goto out_unlock;
2691 }
2692
2693 /*
2694 * We have no kernel internal state, i.e. no waiters in the
2695 * kernel. Waiters which are about to queue themselves are stuck
2696 * on hb->lock. So we can safely ignore them. We do neither
2697 * preserve the WAITERS bit not the OWNER_DIED one. We are the
2698 * owner.
2699 */
2700 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
2701 goto pi_faulted;
2702
2703 /*
2704 * If uval has changed, let user space handle it.
2705 */
2706 ret = (curval == uval) ? 0 : -EAGAIN;
2707
2708 out_unlock:
2709 spin_unlock(&hb->lock);
2710 out_putkey:
2711 put_futex_key(&key);
2712 return ret;
2713
2714 pi_faulted:
2715 spin_unlock(&hb->lock);
2716 put_futex_key(&key);
2717
2718 ret = fault_in_user_writeable(uaddr);
2719 if (!ret)
2720 goto retry;
2721
2722 return ret;
2723 }
2724
2725 /**
2726 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2727 * @hb: the hash_bucket futex_q was original enqueued on
2728 * @q: the futex_q woken while waiting to be requeued
2729 * @key2: the futex_key of the requeue target futex
2730 * @timeout: the timeout associated with the wait (NULL if none)
2731 *
2732 * Detect if the task was woken on the initial futex as opposed to the requeue
2733 * target futex. If so, determine if it was a timeout or a signal that caused
2734 * the wakeup and return the appropriate error code to the caller. Must be
2735 * called with the hb lock held.
2736 *
2737 * Return:
2738 * 0 = no early wakeup detected;
2739 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2740 */
2741 static inline
handle_early_requeue_pi_wakeup(struct futex_hash_bucket * hb,struct futex_q * q,union futex_key * key2,struct hrtimer_sleeper * timeout)2742 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2743 struct futex_q *q, union futex_key *key2,
2744 struct hrtimer_sleeper *timeout)
2745 {
2746 int ret = 0;
2747
2748 /*
2749 * With the hb lock held, we avoid races while we process the wakeup.
2750 * We only need to hold hb (and not hb2) to ensure atomicity as the
2751 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2752 * It can't be requeued from uaddr2 to something else since we don't
2753 * support a PI aware source futex for requeue.
2754 */
2755 if (!match_futex(&q->key, key2)) {
2756 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2757 /*
2758 * We were woken prior to requeue by a timeout or a signal.
2759 * Unqueue the futex_q and determine which it was.
2760 */
2761 plist_del(&q->list, &hb->chain);
2762 hb_waiters_dec(hb);
2763
2764 /* Handle spurious wakeups gracefully */
2765 ret = -EWOULDBLOCK;
2766 if (timeout && !timeout->task)
2767 ret = -ETIMEDOUT;
2768 else if (signal_pending(current))
2769 ret = -ERESTARTNOINTR;
2770 }
2771 return ret;
2772 }
2773
2774 /**
2775 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2776 * @uaddr: the futex we initially wait on (non-pi)
2777 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2778 * the same type, no requeueing from private to shared, etc.
2779 * @val: the expected value of uaddr
2780 * @abs_time: absolute timeout
2781 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2782 * @uaddr2: the pi futex we will take prior to returning to user-space
2783 *
2784 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2785 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
2786 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2787 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
2788 * without one, the pi logic would not know which task to boost/deboost, if
2789 * there was a need to.
2790 *
2791 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2792 * via the following--
2793 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2794 * 2) wakeup on uaddr2 after a requeue
2795 * 3) signal
2796 * 4) timeout
2797 *
2798 * If 3, cleanup and return -ERESTARTNOINTR.
2799 *
2800 * If 2, we may then block on trying to take the rt_mutex and return via:
2801 * 5) successful lock
2802 * 6) signal
2803 * 7) timeout
2804 * 8) other lock acquisition failure
2805 *
2806 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2807 *
2808 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2809 *
2810 * Return:
2811 * 0 - On success;
2812 * <0 - On error
2813 */
futex_wait_requeue_pi(u32 __user * uaddr,unsigned int flags,u32 val,ktime_t * abs_time,u32 bitset,u32 __user * uaddr2)2814 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2815 u32 val, ktime_t *abs_time, u32 bitset,
2816 u32 __user *uaddr2)
2817 {
2818 struct hrtimer_sleeper timeout, *to = NULL;
2819 struct rt_mutex_waiter rt_waiter;
2820 struct futex_hash_bucket *hb;
2821 union futex_key key2 = FUTEX_KEY_INIT;
2822 struct futex_q q = futex_q_init;
2823 int res, ret;
2824
2825 if (uaddr == uaddr2)
2826 return -EINVAL;
2827
2828 if (!bitset)
2829 return -EINVAL;
2830
2831 if (abs_time) {
2832 to = &timeout;
2833 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2834 CLOCK_REALTIME : CLOCK_MONOTONIC,
2835 HRTIMER_MODE_ABS);
2836 hrtimer_init_sleeper(to, current);
2837 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2838 current->timer_slack_ns);
2839 }
2840
2841 /*
2842 * The waiter is allocated on our stack, manipulated by the requeue
2843 * code while we sleep on uaddr.
2844 */
2845 debug_rt_mutex_init_waiter(&rt_waiter);
2846 RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
2847 RB_CLEAR_NODE(&rt_waiter.tree_entry);
2848 rt_waiter.task = NULL;
2849
2850 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2851 if (unlikely(ret != 0))
2852 goto out;
2853
2854 q.bitset = bitset;
2855 q.rt_waiter = &rt_waiter;
2856 q.requeue_pi_key = &key2;
2857
2858 /*
2859 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2860 * count.
2861 */
2862 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2863 if (ret)
2864 goto out_key2;
2865
2866 /*
2867 * The check above which compares uaddrs is not sufficient for
2868 * shared futexes. We need to compare the keys:
2869 */
2870 if (match_futex(&q.key, &key2)) {
2871 queue_unlock(hb);
2872 ret = -EINVAL;
2873 goto out_put_keys;
2874 }
2875
2876 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2877 futex_wait_queue_me(hb, &q, to);
2878
2879 spin_lock(&hb->lock);
2880 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2881 spin_unlock(&hb->lock);
2882 if (ret)
2883 goto out_put_keys;
2884
2885 /*
2886 * In order for us to be here, we know our q.key == key2, and since
2887 * we took the hb->lock above, we also know that futex_requeue() has
2888 * completed and we no longer have to concern ourselves with a wakeup
2889 * race with the atomic proxy lock acquisition by the requeue code. The
2890 * futex_requeue dropped our key1 reference and incremented our key2
2891 * reference count.
2892 */
2893
2894 /* Check if the requeue code acquired the second futex for us. */
2895 if (!q.rt_waiter) {
2896 /*
2897 * Got the lock. We might not be the anticipated owner if we
2898 * did a lock-steal - fix up the PI-state in that case.
2899 */
2900 if (q.pi_state && (q.pi_state->owner != current)) {
2901 spin_lock(q.lock_ptr);
2902 ret = fixup_pi_state_owner(uaddr2, &q, current);
2903 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
2904 rt_mutex_unlock(&q.pi_state->pi_mutex);
2905 /*
2906 * Drop the reference to the pi state which
2907 * the requeue_pi() code acquired for us.
2908 */
2909 put_pi_state(q.pi_state);
2910 spin_unlock(q.lock_ptr);
2911 }
2912 } else {
2913 struct rt_mutex *pi_mutex;
2914
2915 /*
2916 * We have been woken up by futex_unlock_pi(), a timeout, or a
2917 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2918 * the pi_state.
2919 */
2920 WARN_ON(!q.pi_state);
2921 pi_mutex = &q.pi_state->pi_mutex;
2922 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
2923 debug_rt_mutex_free_waiter(&rt_waiter);
2924
2925 spin_lock(q.lock_ptr);
2926 /*
2927 * Fixup the pi_state owner and possibly acquire the lock if we
2928 * haven't already.
2929 */
2930 res = fixup_owner(uaddr2, &q, !ret);
2931 /*
2932 * If fixup_owner() returned an error, proprogate that. If it
2933 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2934 */
2935 if (res)
2936 ret = (res < 0) ? res : 0;
2937
2938 /*
2939 * If fixup_pi_state_owner() faulted and was unable to handle
2940 * the fault, unlock the rt_mutex and return the fault to
2941 * userspace.
2942 */
2943 if (ret && rt_mutex_owner(pi_mutex) == current)
2944 rt_mutex_unlock(pi_mutex);
2945
2946 /* Unqueue and drop the lock. */
2947 unqueue_me_pi(&q);
2948 }
2949
2950 if (ret == -EINTR) {
2951 /*
2952 * We've already been requeued, but cannot restart by calling
2953 * futex_lock_pi() directly. We could restart this syscall, but
2954 * it would detect that the user space "val" changed and return
2955 * -EWOULDBLOCK. Save the overhead of the restart and return
2956 * -EWOULDBLOCK directly.
2957 */
2958 ret = -EWOULDBLOCK;
2959 }
2960
2961 out_put_keys:
2962 put_futex_key(&q.key);
2963 out_key2:
2964 put_futex_key(&key2);
2965
2966 out:
2967 if (to) {
2968 hrtimer_cancel(&to->timer);
2969 destroy_hrtimer_on_stack(&to->timer);
2970 }
2971 return ret;
2972 }
2973
2974 /*
2975 * Support for robust futexes: the kernel cleans up held futexes at
2976 * thread exit time.
2977 *
2978 * Implementation: user-space maintains a per-thread list of locks it
2979 * is holding. Upon do_exit(), the kernel carefully walks this list,
2980 * and marks all locks that are owned by this thread with the
2981 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2982 * always manipulated with the lock held, so the list is private and
2983 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2984 * field, to allow the kernel to clean up if the thread dies after
2985 * acquiring the lock, but just before it could have added itself to
2986 * the list. There can only be one such pending lock.
2987 */
2988
2989 /**
2990 * sys_set_robust_list() - Set the robust-futex list head of a task
2991 * @head: pointer to the list-head
2992 * @len: length of the list-head, as userspace expects
2993 */
SYSCALL_DEFINE2(set_robust_list,struct robust_list_head __user *,head,size_t,len)2994 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2995 size_t, len)
2996 {
2997 if (!futex_cmpxchg_enabled)
2998 return -ENOSYS;
2999 /*
3000 * The kernel knows only one size for now:
3001 */
3002 if (unlikely(len != sizeof(*head)))
3003 return -EINVAL;
3004
3005 current->robust_list = head;
3006
3007 return 0;
3008 }
3009
3010 /**
3011 * sys_get_robust_list() - Get the robust-futex list head of a task
3012 * @pid: pid of the process [zero for current task]
3013 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
3014 * @len_ptr: pointer to a length field, the kernel fills in the header size
3015 */
SYSCALL_DEFINE3(get_robust_list,int,pid,struct robust_list_head __user * __user *,head_ptr,size_t __user *,len_ptr)3016 SYSCALL_DEFINE3(get_robust_list, int, pid,
3017 struct robust_list_head __user * __user *, head_ptr,
3018 size_t __user *, len_ptr)
3019 {
3020 struct robust_list_head __user *head;
3021 unsigned long ret;
3022 struct task_struct *p;
3023
3024 if (!futex_cmpxchg_enabled)
3025 return -ENOSYS;
3026
3027 rcu_read_lock();
3028
3029 ret = -ESRCH;
3030 if (!pid)
3031 p = current;
3032 else {
3033 p = find_task_by_vpid(pid);
3034 if (!p)
3035 goto err_unlock;
3036 }
3037
3038 ret = -EPERM;
3039 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3040 goto err_unlock;
3041
3042 head = p->robust_list;
3043 rcu_read_unlock();
3044
3045 if (put_user(sizeof(*head), len_ptr))
3046 return -EFAULT;
3047 return put_user(head, head_ptr);
3048
3049 err_unlock:
3050 rcu_read_unlock();
3051
3052 return ret;
3053 }
3054
3055 /*
3056 * Process a futex-list entry, check whether it's owned by the
3057 * dying task, and do notification if so:
3058 */
handle_futex_death(u32 __user * uaddr,struct task_struct * curr,int pi)3059 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3060 {
3061 u32 uval, uninitialized_var(nval), mval;
3062
3063 retry:
3064 if (get_user(uval, uaddr))
3065 return -1;
3066
3067 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
3068 /*
3069 * Ok, this dying thread is truly holding a futex
3070 * of interest. Set the OWNER_DIED bit atomically
3071 * via cmpxchg, and if the value had FUTEX_WAITERS
3072 * set, wake up a waiter (if any). (We have to do a
3073 * futex_wake() even if OWNER_DIED is already set -
3074 * to handle the rare but possible case of recursive
3075 * thread-death.) The rest of the cleanup is done in
3076 * userspace.
3077 */
3078 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3079 /*
3080 * We are not holding a lock here, but we want to have
3081 * the pagefault_disable/enable() protection because
3082 * we want to handle the fault gracefully. If the
3083 * access fails we try to fault in the futex with R/W
3084 * verification via get_user_pages. get_user() above
3085 * does not guarantee R/W access. If that fails we
3086 * give up and leave the futex locked.
3087 */
3088 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
3089 if (fault_in_user_writeable(uaddr))
3090 return -1;
3091 goto retry;
3092 }
3093 if (nval != uval)
3094 goto retry;
3095
3096 /*
3097 * Wake robust non-PI futexes here. The wakeup of
3098 * PI futexes happens in exit_pi_state():
3099 */
3100 if (!pi && (uval & FUTEX_WAITERS))
3101 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3102 }
3103 return 0;
3104 }
3105
3106 /*
3107 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3108 */
fetch_robust_entry(struct robust_list __user ** entry,struct robust_list __user * __user * head,unsigned int * pi)3109 static inline int fetch_robust_entry(struct robust_list __user **entry,
3110 struct robust_list __user * __user *head,
3111 unsigned int *pi)
3112 {
3113 unsigned long uentry;
3114
3115 if (get_user(uentry, (unsigned long __user *)head))
3116 return -EFAULT;
3117
3118 *entry = (void __user *)(uentry & ~1UL);
3119 *pi = uentry & 1;
3120
3121 return 0;
3122 }
3123
3124 /*
3125 * Walk curr->robust_list (very carefully, it's a userspace list!)
3126 * and mark any locks found there dead, and notify any waiters.
3127 *
3128 * We silently return on any sign of list-walking problem.
3129 */
exit_robust_list(struct task_struct * curr)3130 void exit_robust_list(struct task_struct *curr)
3131 {
3132 struct robust_list_head __user *head = curr->robust_list;
3133 struct robust_list __user *entry, *next_entry, *pending;
3134 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3135 unsigned int uninitialized_var(next_pi);
3136 unsigned long futex_offset;
3137 int rc;
3138
3139 if (!futex_cmpxchg_enabled)
3140 return;
3141
3142 /*
3143 * Fetch the list head (which was registered earlier, via
3144 * sys_set_robust_list()):
3145 */
3146 if (fetch_robust_entry(&entry, &head->list.next, &pi))
3147 return;
3148 /*
3149 * Fetch the relative futex offset:
3150 */
3151 if (get_user(futex_offset, &head->futex_offset))
3152 return;
3153 /*
3154 * Fetch any possibly pending lock-add first, and handle it
3155 * if it exists:
3156 */
3157 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3158 return;
3159
3160 next_entry = NULL; /* avoid warning with gcc */
3161 while (entry != &head->list) {
3162 /*
3163 * Fetch the next entry in the list before calling
3164 * handle_futex_death:
3165 */
3166 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3167 /*
3168 * A pending lock might already be on the list, so
3169 * don't process it twice:
3170 */
3171 if (entry != pending)
3172 if (handle_futex_death((void __user *)entry + futex_offset,
3173 curr, pi))
3174 return;
3175 if (rc)
3176 return;
3177 entry = next_entry;
3178 pi = next_pi;
3179 /*
3180 * Avoid excessively long or circular lists:
3181 */
3182 if (!--limit)
3183 break;
3184
3185 cond_resched();
3186 }
3187
3188 if (pending)
3189 handle_futex_death((void __user *)pending + futex_offset,
3190 curr, pip);
3191 }
3192
do_futex(u32 __user * uaddr,int op,u32 val,ktime_t * timeout,u32 __user * uaddr2,u32 val2,u32 val3)3193 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3194 u32 __user *uaddr2, u32 val2, u32 val3)
3195 {
3196 int cmd = op & FUTEX_CMD_MASK;
3197 unsigned int flags = 0;
3198
3199 if (!(op & FUTEX_PRIVATE_FLAG))
3200 flags |= FLAGS_SHARED;
3201
3202 if (op & FUTEX_CLOCK_REALTIME) {
3203 flags |= FLAGS_CLOCKRT;
3204 if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3205 cmd != FUTEX_WAIT_REQUEUE_PI)
3206 return -ENOSYS;
3207 }
3208
3209 switch (cmd) {
3210 case FUTEX_LOCK_PI:
3211 case FUTEX_UNLOCK_PI:
3212 case FUTEX_TRYLOCK_PI:
3213 case FUTEX_WAIT_REQUEUE_PI:
3214 case FUTEX_CMP_REQUEUE_PI:
3215 if (!futex_cmpxchg_enabled)
3216 return -ENOSYS;
3217 }
3218
3219 switch (cmd) {
3220 case FUTEX_WAIT:
3221 val3 = FUTEX_BITSET_MATCH_ANY;
3222 case FUTEX_WAIT_BITSET:
3223 return futex_wait(uaddr, flags, val, timeout, val3);
3224 case FUTEX_WAKE:
3225 val3 = FUTEX_BITSET_MATCH_ANY;
3226 case FUTEX_WAKE_BITSET:
3227 return futex_wake(uaddr, flags, val, val3);
3228 case FUTEX_REQUEUE:
3229 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3230 case FUTEX_CMP_REQUEUE:
3231 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3232 case FUTEX_WAKE_OP:
3233 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3234 case FUTEX_LOCK_PI:
3235 return futex_lock_pi(uaddr, flags, timeout, 0);
3236 case FUTEX_UNLOCK_PI:
3237 return futex_unlock_pi(uaddr, flags);
3238 case FUTEX_TRYLOCK_PI:
3239 return futex_lock_pi(uaddr, flags, NULL, 1);
3240 case FUTEX_WAIT_REQUEUE_PI:
3241 val3 = FUTEX_BITSET_MATCH_ANY;
3242 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3243 uaddr2);
3244 case FUTEX_CMP_REQUEUE_PI:
3245 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3246 }
3247 return -ENOSYS;
3248 }
3249
3250
SYSCALL_DEFINE6(futex,u32 __user *,uaddr,int,op,u32,val,struct timespec __user *,utime,u32 __user *,uaddr2,u32,val3)3251 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3252 struct timespec __user *, utime, u32 __user *, uaddr2,
3253 u32, val3)
3254 {
3255 struct timespec ts;
3256 ktime_t t, *tp = NULL;
3257 u32 val2 = 0;
3258 int cmd = op & FUTEX_CMD_MASK;
3259
3260 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3261 cmd == FUTEX_WAIT_BITSET ||
3262 cmd == FUTEX_WAIT_REQUEUE_PI)) {
3263 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3264 return -EFAULT;
3265 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
3266 return -EFAULT;
3267 if (!timespec_valid(&ts))
3268 return -EINVAL;
3269
3270 t = timespec_to_ktime(ts);
3271 if (cmd == FUTEX_WAIT)
3272 t = ktime_add_safe(ktime_get(), t);
3273 tp = &t;
3274 }
3275 /*
3276 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3277 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3278 */
3279 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3280 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3281 val2 = (u32) (unsigned long) utime;
3282
3283 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3284 }
3285
futex_detect_cmpxchg(void)3286 static void __init futex_detect_cmpxchg(void)
3287 {
3288 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3289 u32 curval;
3290
3291 /*
3292 * This will fail and we want it. Some arch implementations do
3293 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3294 * functionality. We want to know that before we call in any
3295 * of the complex code paths. Also we want to prevent
3296 * registration of robust lists in that case. NULL is
3297 * guaranteed to fault and we get -EFAULT on functional
3298 * implementation, the non-functional ones will return
3299 * -ENOSYS.
3300 */
3301 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3302 futex_cmpxchg_enabled = 1;
3303 #endif
3304 }
3305
futex_init(void)3306 static int __init futex_init(void)
3307 {
3308 unsigned int futex_shift;
3309 unsigned long i;
3310
3311 #if CONFIG_BASE_SMALL
3312 futex_hashsize = 16;
3313 #else
3314 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3315 #endif
3316
3317 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3318 futex_hashsize, 0,
3319 futex_hashsize < 256 ? HASH_SMALL : 0,
3320 &futex_shift, NULL,
3321 futex_hashsize, futex_hashsize);
3322 futex_hashsize = 1UL << futex_shift;
3323
3324 futex_detect_cmpxchg();
3325
3326 for (i = 0; i < futex_hashsize; i++) {
3327 atomic_set(&futex_queues[i].waiters, 0);
3328 plist_head_init(&futex_queues[i].chain);
3329 spin_lock_init(&futex_queues[i].lock);
3330 }
3331
3332 return 0;
3333 }
3334 core_initcall(futex_init);
3335