1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Fast Userspace Mutexes (which I call "Futexes!").
4 * (C) Rusty Russell, IBM 2002
5 *
6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8 *
9 * Removed page pinning, fix privately mapped COW pages and other cleanups
10 * (C) Copyright 2003, 2004 Jamie Lokier
11 *
12 * Robust futex support started by Ingo Molnar
13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15 *
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19 *
20 * PRIVATE futexes by Eric Dumazet
21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22 *
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24 * Copyright (C) IBM Corporation, 2009
25 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
26 *
27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28 * enough at me, Linus for the original (flawed) idea, Matthew
29 * Kirkwood for proof-of-concept implementation.
30 *
31 * "The futexes are also cursed."
32 * "But they come in a choice of three flavours!"
33 */
34 #include <linux/compat.h>
35 #include <linux/jhash.h>
36 #include <linux/pagemap.h>
37 #include <linux/syscalls.h>
38 #include <linux/freezer.h>
39 #include <linux/memblock.h>
40 #include <linux/fault-inject.h>
41 #include <linux/time_namespace.h>
42
43 #include <asm/futex.h>
44
45 #include "../locking/rtmutex_common.h"
46 #include <trace/hooks/futex.h>
47
48 /*
49 * READ this before attempting to hack on futexes!
50 *
51 * Basic futex operation and ordering guarantees
52 * =============================================
53 *
54 * The waiter reads the futex value in user space and calls
55 * futex_wait(). This function computes the hash bucket and acquires
56 * the hash bucket lock. After that it reads the futex user space value
57 * again and verifies that the data has not changed. If it has not changed
58 * it enqueues itself into the hash bucket, releases the hash bucket lock
59 * and schedules.
60 *
61 * The waker side modifies the user space value of the futex and calls
62 * futex_wake(). This function computes the hash bucket and acquires the
63 * hash bucket lock. Then it looks for waiters on that futex in the hash
64 * bucket and wakes them.
65 *
66 * In futex wake up scenarios where no tasks are blocked on a futex, taking
67 * the hb spinlock can be avoided and simply return. In order for this
68 * optimization to work, ordering guarantees must exist so that the waiter
69 * being added to the list is acknowledged when the list is concurrently being
70 * checked by the waker, avoiding scenarios like the following:
71 *
72 * CPU 0 CPU 1
73 * val = *futex;
74 * sys_futex(WAIT, futex, val);
75 * futex_wait(futex, val);
76 * uval = *futex;
77 * *futex = newval;
78 * sys_futex(WAKE, futex);
79 * futex_wake(futex);
80 * if (queue_empty())
81 * return;
82 * if (uval == val)
83 * lock(hash_bucket(futex));
84 * queue();
85 * unlock(hash_bucket(futex));
86 * schedule();
87 *
88 * This would cause the waiter on CPU 0 to wait forever because it
89 * missed the transition of the user space value from val to newval
90 * and the waker did not find the waiter in the hash bucket queue.
91 *
92 * The correct serialization ensures that a waiter either observes
93 * the changed user space value before blocking or is woken by a
94 * concurrent waker:
95 *
96 * CPU 0 CPU 1
97 * val = *futex;
98 * sys_futex(WAIT, futex, val);
99 * futex_wait(futex, val);
100 *
101 * waiters++; (a)
102 * smp_mb(); (A) <-- paired with -.
103 * |
104 * lock(hash_bucket(futex)); |
105 * |
106 * uval = *futex; |
107 * | *futex = newval;
108 * | sys_futex(WAKE, futex);
109 * | futex_wake(futex);
110 * |
111 * `--------> smp_mb(); (B)
112 * if (uval == val)
113 * queue();
114 * unlock(hash_bucket(futex));
115 * schedule(); if (waiters)
116 * lock(hash_bucket(futex));
117 * else wake_waiters(futex);
118 * waiters--; (b) unlock(hash_bucket(futex));
119 *
120 * Where (A) orders the waiters increment and the futex value read through
121 * atomic operations (see hb_waiters_inc) and where (B) orders the write
122 * to futex and the waiters read (see hb_waiters_pending()).
123 *
124 * This yields the following case (where X:=waiters, Y:=futex):
125 *
126 * X = Y = 0
127 *
128 * w[X]=1 w[Y]=1
129 * MB MB
130 * r[Y]=y r[X]=x
131 *
132 * Which guarantees that x==0 && y==0 is impossible; which translates back into
133 * the guarantee that we cannot both miss the futex variable change and the
134 * enqueue.
135 *
136 * Note that a new waiter is accounted for in (a) even when it is possible that
137 * the wait call can return error, in which case we backtrack from it in (b).
138 * Refer to the comment in queue_lock().
139 *
140 * Similarly, in order to account for waiters being requeued on another
141 * address we always increment the waiters for the destination bucket before
142 * acquiring the lock. It then decrements them again after releasing it -
143 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
144 * will do the additional required waiter count housekeeping. This is done for
145 * double_lock_hb() and double_unlock_hb(), respectively.
146 */
147
148 #ifdef CONFIG_HAVE_FUTEX_CMPXCHG
149 #define futex_cmpxchg_enabled 1
150 #else
151 static int __read_mostly futex_cmpxchg_enabled;
152 #endif
153
154 /*
155 * Futex flags used to encode options to functions and preserve them across
156 * restarts.
157 */
158 #ifdef CONFIG_MMU
159 # define FLAGS_SHARED 0x01
160 #else
161 /*
162 * NOMMU does not have per process address space. Let the compiler optimize
163 * code away.
164 */
165 # define FLAGS_SHARED 0x00
166 #endif
167 #define FLAGS_CLOCKRT 0x02
168 #define FLAGS_HAS_TIMEOUT 0x04
169
170 /*
171 * Priority Inheritance state:
172 */
173 struct futex_pi_state {
174 /*
175 * list of 'owned' pi_state instances - these have to be
176 * cleaned up in do_exit() if the task exits prematurely:
177 */
178 struct list_head list;
179
180 /*
181 * The PI object:
182 */
183 struct rt_mutex pi_mutex;
184
185 struct task_struct *owner;
186 refcount_t refcount;
187
188 union futex_key key;
189 } __randomize_layout;
190
191 /**
192 * struct futex_q - The hashed futex queue entry, one per waiting task
193 * @list: priority-sorted list of tasks waiting on this futex
194 * @task: the task waiting on the futex
195 * @lock_ptr: the hash bucket lock
196 * @key: the key the futex is hashed on
197 * @pi_state: optional priority inheritance state
198 * @rt_waiter: rt_waiter storage for use with requeue_pi
199 * @requeue_pi_key: the requeue_pi target futex key
200 * @bitset: bitset for the optional bitmasked wakeup
201 *
202 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
203 * we can wake only the relevant ones (hashed queues may be shared).
204 *
205 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
206 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
207 * The order of wakeup is always to make the first condition true, then
208 * the second.
209 *
210 * PI futexes are typically woken before they are removed from the hash list via
211 * the rt_mutex code. See unqueue_me_pi().
212 */
213 struct futex_q {
214 struct plist_node list;
215
216 struct task_struct *task;
217 spinlock_t *lock_ptr;
218 union futex_key key;
219 struct futex_pi_state *pi_state;
220 struct rt_mutex_waiter *rt_waiter;
221 union futex_key *requeue_pi_key;
222 u32 bitset;
223 } __randomize_layout;
224
225 static const struct futex_q futex_q_init = {
226 /* list gets initialized in queue_me()*/
227 .key = FUTEX_KEY_INIT,
228 .bitset = FUTEX_BITSET_MATCH_ANY
229 };
230
231 /*
232 * Hash buckets are shared by all the futex_keys that hash to the same
233 * location. Each key may have multiple futex_q structures, one for each task
234 * waiting on a futex.
235 */
236 struct futex_hash_bucket {
237 atomic_t waiters;
238 spinlock_t lock;
239 struct plist_head chain;
240 } ____cacheline_aligned_in_smp;
241
242 /*
243 * The base of the bucket array and its size are always used together
244 * (after initialization only in hash_futex()), so ensure that they
245 * reside in the same cacheline.
246 */
247 static struct {
248 struct futex_hash_bucket *queues;
249 unsigned long hashsize;
250 } __futex_data __read_mostly __aligned(2*sizeof(long));
251 #define futex_queues (__futex_data.queues)
252 #define futex_hashsize (__futex_data.hashsize)
253
254
255 /*
256 * Fault injections for futexes.
257 */
258 #ifdef CONFIG_FAIL_FUTEX
259
260 static struct {
261 struct fault_attr attr;
262
263 bool ignore_private;
264 } fail_futex = {
265 .attr = FAULT_ATTR_INITIALIZER,
266 .ignore_private = false,
267 };
268
setup_fail_futex(char * str)269 static int __init setup_fail_futex(char *str)
270 {
271 return setup_fault_attr(&fail_futex.attr, str);
272 }
273 __setup("fail_futex=", setup_fail_futex);
274
should_fail_futex(bool fshared)275 static bool should_fail_futex(bool fshared)
276 {
277 if (fail_futex.ignore_private && !fshared)
278 return false;
279
280 return should_fail(&fail_futex.attr, 1);
281 }
282
283 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
284
fail_futex_debugfs(void)285 static int __init fail_futex_debugfs(void)
286 {
287 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
288 struct dentry *dir;
289
290 dir = fault_create_debugfs_attr("fail_futex", NULL,
291 &fail_futex.attr);
292 if (IS_ERR(dir))
293 return PTR_ERR(dir);
294
295 debugfs_create_bool("ignore-private", mode, dir,
296 &fail_futex.ignore_private);
297 return 0;
298 }
299
300 late_initcall(fail_futex_debugfs);
301
302 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
303
304 #else
should_fail_futex(bool fshared)305 static inline bool should_fail_futex(bool fshared)
306 {
307 return false;
308 }
309 #endif /* CONFIG_FAIL_FUTEX */
310
311 #ifdef CONFIG_COMPAT
312 static void compat_exit_robust_list(struct task_struct *curr);
313 #else
compat_exit_robust_list(struct task_struct * curr)314 static inline void compat_exit_robust_list(struct task_struct *curr) { }
315 #endif
316
317 /*
318 * Reflects a new waiter being added to the waitqueue.
319 */
hb_waiters_inc(struct futex_hash_bucket * hb)320 static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
321 {
322 #ifdef CONFIG_SMP
323 atomic_inc(&hb->waiters);
324 /*
325 * Full barrier (A), see the ordering comment above.
326 */
327 smp_mb__after_atomic();
328 #endif
329 }
330
331 /*
332 * Reflects a waiter being removed from the waitqueue by wakeup
333 * paths.
334 */
hb_waiters_dec(struct futex_hash_bucket * hb)335 static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
336 {
337 #ifdef CONFIG_SMP
338 atomic_dec(&hb->waiters);
339 #endif
340 }
341
hb_waiters_pending(struct futex_hash_bucket * hb)342 static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
343 {
344 #ifdef CONFIG_SMP
345 /*
346 * Full barrier (B), see the ordering comment above.
347 */
348 smp_mb();
349 return atomic_read(&hb->waiters);
350 #else
351 return 1;
352 #endif
353 }
354
355 /**
356 * hash_futex - Return the hash bucket in the global hash
357 * @key: Pointer to the futex key for which the hash is calculated
358 *
359 * We hash on the keys returned from get_futex_key (see below) and return the
360 * corresponding hash bucket in the global hash.
361 */
hash_futex(union futex_key * key)362 static struct futex_hash_bucket *hash_futex(union futex_key *key)
363 {
364 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
365 key->both.offset);
366
367 return &futex_queues[hash & (futex_hashsize - 1)];
368 }
369
370
371 /**
372 * match_futex - Check whether two futex keys are equal
373 * @key1: Pointer to key1
374 * @key2: Pointer to key2
375 *
376 * Return 1 if two futex_keys are equal, 0 otherwise.
377 */
match_futex(union futex_key * key1,union futex_key * key2)378 static inline int match_futex(union futex_key *key1, union futex_key *key2)
379 {
380 return (key1 && key2
381 && key1->both.word == key2->both.word
382 && key1->both.ptr == key2->both.ptr
383 && key1->both.offset == key2->both.offset);
384 }
385
386 enum futex_access {
387 FUTEX_READ,
388 FUTEX_WRITE
389 };
390
391 /**
392 * futex_setup_timer - set up the sleeping hrtimer.
393 * @time: ptr to the given timeout value
394 * @timeout: the hrtimer_sleeper structure to be set up
395 * @flags: futex flags
396 * @range_ns: optional range in ns
397 *
398 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
399 * value given
400 */
401 static inline struct hrtimer_sleeper *
futex_setup_timer(ktime_t * time,struct hrtimer_sleeper * timeout,int flags,u64 range_ns)402 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
403 int flags, u64 range_ns)
404 {
405 if (!time)
406 return NULL;
407
408 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
409 CLOCK_REALTIME : CLOCK_MONOTONIC,
410 HRTIMER_MODE_ABS);
411 /*
412 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
413 * effectively the same as calling hrtimer_set_expires().
414 */
415 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
416
417 return timeout;
418 }
419
420 /*
421 * Generate a machine wide unique identifier for this inode.
422 *
423 * This relies on u64 not wrapping in the life-time of the machine; which with
424 * 1ns resolution means almost 585 years.
425 *
426 * This further relies on the fact that a well formed program will not unmap
427 * the file while it has a (shared) futex waiting on it. This mapping will have
428 * a file reference which pins the mount and inode.
429 *
430 * If for some reason an inode gets evicted and read back in again, it will get
431 * a new sequence number and will _NOT_ match, even though it is the exact same
432 * file.
433 *
434 * It is important that match_futex() will never have a false-positive, esp.
435 * for PI futexes that can mess up the state. The above argues that false-negatives
436 * are only possible for malformed programs.
437 */
get_inode_sequence_number(struct inode * inode)438 static u64 get_inode_sequence_number(struct inode *inode)
439 {
440 static atomic64_t i_seq;
441 u64 old;
442
443 /* Does the inode already have a sequence number? */
444 old = atomic64_read(&inode->i_sequence);
445 if (likely(old))
446 return old;
447
448 for (;;) {
449 u64 new = atomic64_add_return(1, &i_seq);
450 if (WARN_ON_ONCE(!new))
451 continue;
452
453 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
454 if (old)
455 return old;
456 return new;
457 }
458 }
459
460 /**
461 * get_futex_key() - Get parameters which are the keys for a futex
462 * @uaddr: virtual address of the futex
463 * @fshared: false for a PROCESS_PRIVATE futex, true for PROCESS_SHARED
464 * @key: address where result is stored.
465 * @rw: mapping needs to be read/write (values: FUTEX_READ,
466 * FUTEX_WRITE)
467 *
468 * Return: a negative error code or 0
469 *
470 * The key words are stored in @key on success.
471 *
472 * For shared mappings (when @fshared), the key is:
473 *
474 * ( inode->i_sequence, page->index, offset_within_page )
475 *
476 * [ also see get_inode_sequence_number() ]
477 *
478 * For private mappings (or when !@fshared), the key is:
479 *
480 * ( current->mm, address, 0 )
481 *
482 * This allows (cross process, where applicable) identification of the futex
483 * without keeping the page pinned for the duration of the FUTEX_WAIT.
484 *
485 * lock_page() might sleep, the caller should not hold a spinlock.
486 */
get_futex_key(u32 __user * uaddr,bool fshared,union futex_key * key,enum futex_access rw)487 static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
488 enum futex_access rw)
489 {
490 unsigned long address = (unsigned long)uaddr;
491 struct mm_struct *mm = current->mm;
492 struct page *page, *tail;
493 struct address_space *mapping;
494 int err, ro = 0;
495
496 /*
497 * The futex address must be "naturally" aligned.
498 */
499 key->both.offset = address % PAGE_SIZE;
500 if (unlikely((address % sizeof(u32)) != 0))
501 return -EINVAL;
502 address -= key->both.offset;
503
504 if (unlikely(!access_ok(uaddr, sizeof(u32))))
505 return -EFAULT;
506
507 if (unlikely(should_fail_futex(fshared)))
508 return -EFAULT;
509
510 /*
511 * PROCESS_PRIVATE futexes are fast.
512 * As the mm cannot disappear under us and the 'key' only needs
513 * virtual address, we dont even have to find the underlying vma.
514 * Note : We do have to check 'uaddr' is a valid user address,
515 * but access_ok() should be faster than find_vma()
516 */
517 if (!fshared) {
518 /*
519 * On no-MMU, shared futexes are treated as private, therefore
520 * we must not include the current process in the key. Since
521 * there is only one address space, the address is a unique key
522 * on its own.
523 */
524 if (IS_ENABLED(CONFIG_MMU))
525 key->private.mm = mm;
526 else
527 key->private.mm = NULL;
528
529 key->private.address = address;
530 return 0;
531 }
532
533 again:
534 /* Ignore any VERIFY_READ mapping (futex common case) */
535 if (unlikely(should_fail_futex(true)))
536 return -EFAULT;
537
538 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
539 /*
540 * If write access is not required (eg. FUTEX_WAIT), try
541 * and get read-only access.
542 */
543 if (err == -EFAULT && rw == FUTEX_READ) {
544 err = get_user_pages_fast(address, 1, 0, &page);
545 ro = 1;
546 }
547 if (err < 0)
548 return err;
549 else
550 err = 0;
551
552 /*
553 * The treatment of mapping from this point on is critical. The page
554 * lock protects many things but in this context the page lock
555 * stabilizes mapping, prevents inode freeing in the shared
556 * file-backed region case and guards against movement to swap cache.
557 *
558 * Strictly speaking the page lock is not needed in all cases being
559 * considered here and page lock forces unnecessarily serialization
560 * From this point on, mapping will be re-verified if necessary and
561 * page lock will be acquired only if it is unavoidable
562 *
563 * Mapping checks require the head page for any compound page so the
564 * head page and mapping is looked up now. For anonymous pages, it
565 * does not matter if the page splits in the future as the key is
566 * based on the address. For filesystem-backed pages, the tail is
567 * required as the index of the page determines the key. For
568 * base pages, there is no tail page and tail == page.
569 */
570 tail = page;
571 page = compound_head(page);
572 mapping = READ_ONCE(page->mapping);
573
574 /*
575 * If page->mapping is NULL, then it cannot be a PageAnon
576 * page; but it might be the ZERO_PAGE or in the gate area or
577 * in a special mapping (all cases which we are happy to fail);
578 * or it may have been a good file page when get_user_pages_fast
579 * found it, but truncated or holepunched or subjected to
580 * invalidate_complete_page2 before we got the page lock (also
581 * cases which we are happy to fail). And we hold a reference,
582 * so refcount care in invalidate_complete_page's remove_mapping
583 * prevents drop_caches from setting mapping to NULL beneath us.
584 *
585 * The case we do have to guard against is when memory pressure made
586 * shmem_writepage move it from filecache to swapcache beneath us:
587 * an unlikely race, but we do need to retry for page->mapping.
588 */
589 if (unlikely(!mapping)) {
590 int shmem_swizzled;
591
592 /*
593 * Page lock is required to identify which special case above
594 * applies. If this is really a shmem page then the page lock
595 * will prevent unexpected transitions.
596 */
597 lock_page(page);
598 shmem_swizzled = PageSwapCache(page) || page->mapping;
599 unlock_page(page);
600 put_user_page(page);
601
602 if (shmem_swizzled)
603 goto again;
604
605 return -EFAULT;
606 }
607
608 /*
609 * Private mappings are handled in a simple way.
610 *
611 * If the futex key is stored on an anonymous page, then the associated
612 * object is the mm which is implicitly pinned by the calling process.
613 *
614 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
615 * it's a read-only handle, it's expected that futexes attach to
616 * the object not the particular process.
617 */
618 if (PageAnon(page)) {
619 /*
620 * A RO anonymous page will never change and thus doesn't make
621 * sense for futex operations.
622 */
623 if (unlikely(should_fail_futex(true)) || ro) {
624 err = -EFAULT;
625 goto out;
626 }
627
628 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
629 key->private.mm = mm;
630 key->private.address = address;
631
632 } else {
633 struct inode *inode;
634
635 /*
636 * The associated futex object in this case is the inode and
637 * the page->mapping must be traversed. Ordinarily this should
638 * be stabilised under page lock but it's not strictly
639 * necessary in this case as we just want to pin the inode, not
640 * update the radix tree or anything like that.
641 *
642 * The RCU read lock is taken as the inode is finally freed
643 * under RCU. If the mapping still matches expectations then the
644 * mapping->host can be safely accessed as being a valid inode.
645 */
646 rcu_read_lock();
647
648 if (READ_ONCE(page->mapping) != mapping) {
649 rcu_read_unlock();
650 put_user_page(page);
651
652 goto again;
653 }
654
655 inode = READ_ONCE(mapping->host);
656 if (!inode) {
657 rcu_read_unlock();
658 put_user_page(page);
659
660 goto again;
661 }
662
663 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
664 key->shared.i_seq = get_inode_sequence_number(inode);
665 key->shared.pgoff = page_to_pgoff(tail);
666 rcu_read_unlock();
667 }
668
669 out:
670 put_user_page(page);
671 return err;
672 }
673
674 /**
675 * fault_in_user_writeable() - Fault in user address and verify RW access
676 * @uaddr: pointer to faulting user space address
677 *
678 * Slow path to fixup the fault we just took in the atomic write
679 * access to @uaddr.
680 *
681 * We have no generic implementation of a non-destructive write to the
682 * user address. We know that we faulted in the atomic pagefault
683 * disabled section so we can as well avoid the #PF overhead by
684 * calling get_user_pages() right away.
685 */
fault_in_user_writeable(u32 __user * uaddr)686 static int fault_in_user_writeable(u32 __user *uaddr)
687 {
688 struct mm_struct *mm = current->mm;
689 int ret;
690
691 mmap_read_lock(mm);
692 ret = fixup_user_fault(mm, (unsigned long)uaddr,
693 FAULT_FLAG_WRITE, NULL);
694 mmap_read_unlock(mm);
695
696 return ret < 0 ? ret : 0;
697 }
698
699 /**
700 * futex_top_waiter() - Return the highest priority waiter on a futex
701 * @hb: the hash bucket the futex_q's reside in
702 * @key: the futex key (to distinguish it from other futex futex_q's)
703 *
704 * Must be called with the hb lock held.
705 */
futex_top_waiter(struct futex_hash_bucket * hb,union futex_key * key)706 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
707 union futex_key *key)
708 {
709 struct futex_q *this;
710
711 plist_for_each_entry(this, &hb->chain, list) {
712 if (match_futex(&this->key, key))
713 return this;
714 }
715 return NULL;
716 }
717
cmpxchg_futex_value_locked(u32 * curval,u32 __user * uaddr,u32 uval,u32 newval)718 static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
719 u32 uval, u32 newval)
720 {
721 int ret;
722
723 pagefault_disable();
724 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
725 pagefault_enable();
726
727 return ret;
728 }
729
get_futex_value_locked(u32 * dest,u32 __user * from)730 static int get_futex_value_locked(u32 *dest, u32 __user *from)
731 {
732 int ret;
733
734 pagefault_disable();
735 ret = __get_user(*dest, from);
736 pagefault_enable();
737
738 return ret ? -EFAULT : 0;
739 }
740
741
742 /*
743 * PI code:
744 */
refill_pi_state_cache(void)745 static int refill_pi_state_cache(void)
746 {
747 struct futex_pi_state *pi_state;
748
749 if (likely(current->pi_state_cache))
750 return 0;
751
752 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
753
754 if (!pi_state)
755 return -ENOMEM;
756
757 INIT_LIST_HEAD(&pi_state->list);
758 /* pi_mutex gets initialized later */
759 pi_state->owner = NULL;
760 refcount_set(&pi_state->refcount, 1);
761 pi_state->key = FUTEX_KEY_INIT;
762
763 current->pi_state_cache = pi_state;
764
765 return 0;
766 }
767
alloc_pi_state(void)768 static struct futex_pi_state *alloc_pi_state(void)
769 {
770 struct futex_pi_state *pi_state = current->pi_state_cache;
771
772 WARN_ON(!pi_state);
773 current->pi_state_cache = NULL;
774
775 return pi_state;
776 }
777
pi_state_update_owner(struct futex_pi_state * pi_state,struct task_struct * new_owner)778 static void pi_state_update_owner(struct futex_pi_state *pi_state,
779 struct task_struct *new_owner)
780 {
781 struct task_struct *old_owner = pi_state->owner;
782
783 lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
784
785 if (old_owner) {
786 raw_spin_lock(&old_owner->pi_lock);
787 WARN_ON(list_empty(&pi_state->list));
788 list_del_init(&pi_state->list);
789 raw_spin_unlock(&old_owner->pi_lock);
790 }
791
792 if (new_owner) {
793 raw_spin_lock(&new_owner->pi_lock);
794 WARN_ON(!list_empty(&pi_state->list));
795 list_add(&pi_state->list, &new_owner->pi_state_list);
796 pi_state->owner = new_owner;
797 raw_spin_unlock(&new_owner->pi_lock);
798 }
799 }
800
get_pi_state(struct futex_pi_state * pi_state)801 static void get_pi_state(struct futex_pi_state *pi_state)
802 {
803 WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
804 }
805
806 /*
807 * Drops a reference to the pi_state object and frees or caches it
808 * when the last reference is gone.
809 */
put_pi_state(struct futex_pi_state * pi_state)810 static void put_pi_state(struct futex_pi_state *pi_state)
811 {
812 if (!pi_state)
813 return;
814
815 if (!refcount_dec_and_test(&pi_state->refcount))
816 return;
817
818 /*
819 * If pi_state->owner is NULL, the owner is most probably dying
820 * and has cleaned up the pi_state already
821 */
822 if (pi_state->owner) {
823 unsigned long flags;
824
825 raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
826 pi_state_update_owner(pi_state, NULL);
827 rt_mutex_proxy_unlock(&pi_state->pi_mutex);
828 raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
829 }
830
831 if (current->pi_state_cache) {
832 kfree(pi_state);
833 } else {
834 /*
835 * pi_state->list is already empty.
836 * clear pi_state->owner.
837 * refcount is at 0 - put it back to 1.
838 */
839 pi_state->owner = NULL;
840 refcount_set(&pi_state->refcount, 1);
841 current->pi_state_cache = pi_state;
842 }
843 }
844
845 #ifdef CONFIG_FUTEX_PI
846
847 /*
848 * This task is holding PI mutexes at exit time => bad.
849 * Kernel cleans up PI-state, but userspace is likely hosed.
850 * (Robust-futex cleanup is separate and might save the day for userspace.)
851 */
exit_pi_state_list(struct task_struct * curr)852 static void exit_pi_state_list(struct task_struct *curr)
853 {
854 struct list_head *next, *head = &curr->pi_state_list;
855 struct futex_pi_state *pi_state;
856 struct futex_hash_bucket *hb;
857 union futex_key key = FUTEX_KEY_INIT;
858
859 if (!futex_cmpxchg_enabled)
860 return;
861 /*
862 * We are a ZOMBIE and nobody can enqueue itself on
863 * pi_state_list anymore, but we have to be careful
864 * versus waiters unqueueing themselves:
865 */
866 raw_spin_lock_irq(&curr->pi_lock);
867 while (!list_empty(head)) {
868 next = head->next;
869 pi_state = list_entry(next, struct futex_pi_state, list);
870 key = pi_state->key;
871 hb = hash_futex(&key);
872
873 /*
874 * We can race against put_pi_state() removing itself from the
875 * list (a waiter going away). put_pi_state() will first
876 * decrement the reference count and then modify the list, so
877 * its possible to see the list entry but fail this reference
878 * acquire.
879 *
880 * In that case; drop the locks to let put_pi_state() make
881 * progress and retry the loop.
882 */
883 if (!refcount_inc_not_zero(&pi_state->refcount)) {
884 raw_spin_unlock_irq(&curr->pi_lock);
885 cpu_relax();
886 raw_spin_lock_irq(&curr->pi_lock);
887 continue;
888 }
889 raw_spin_unlock_irq(&curr->pi_lock);
890
891 spin_lock(&hb->lock);
892 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
893 raw_spin_lock(&curr->pi_lock);
894 /*
895 * We dropped the pi-lock, so re-check whether this
896 * task still owns the PI-state:
897 */
898 if (head->next != next) {
899 /* retain curr->pi_lock for the loop invariant */
900 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
901 spin_unlock(&hb->lock);
902 put_pi_state(pi_state);
903 continue;
904 }
905
906 WARN_ON(pi_state->owner != curr);
907 WARN_ON(list_empty(&pi_state->list));
908 list_del_init(&pi_state->list);
909 pi_state->owner = NULL;
910
911 raw_spin_unlock(&curr->pi_lock);
912 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
913 spin_unlock(&hb->lock);
914
915 rt_mutex_futex_unlock(&pi_state->pi_mutex);
916 put_pi_state(pi_state);
917
918 raw_spin_lock_irq(&curr->pi_lock);
919 }
920 raw_spin_unlock_irq(&curr->pi_lock);
921 }
922 #else
exit_pi_state_list(struct task_struct * curr)923 static inline void exit_pi_state_list(struct task_struct *curr) { }
924 #endif
925
926 /*
927 * We need to check the following states:
928 *
929 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
930 *
931 * [1] NULL | --- | --- | 0 | 0/1 | Valid
932 * [2] NULL | --- | --- | >0 | 0/1 | Valid
933 *
934 * [3] Found | NULL | -- | Any | 0/1 | Invalid
935 *
936 * [4] Found | Found | NULL | 0 | 1 | Valid
937 * [5] Found | Found | NULL | >0 | 1 | Invalid
938 *
939 * [6] Found | Found | task | 0 | 1 | Valid
940 *
941 * [7] Found | Found | NULL | Any | 0 | Invalid
942 *
943 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
944 * [9] Found | Found | task | 0 | 0 | Invalid
945 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
946 *
947 * [1] Indicates that the kernel can acquire the futex atomically. We
948 * came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
949 *
950 * [2] Valid, if TID does not belong to a kernel thread. If no matching
951 * thread is found then it indicates that the owner TID has died.
952 *
953 * [3] Invalid. The waiter is queued on a non PI futex
954 *
955 * [4] Valid state after exit_robust_list(), which sets the user space
956 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
957 *
958 * [5] The user space value got manipulated between exit_robust_list()
959 * and exit_pi_state_list()
960 *
961 * [6] Valid state after exit_pi_state_list() which sets the new owner in
962 * the pi_state but cannot access the user space value.
963 *
964 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
965 *
966 * [8] Owner and user space value match
967 *
968 * [9] There is no transient state which sets the user space TID to 0
969 * except exit_robust_list(), but this is indicated by the
970 * FUTEX_OWNER_DIED bit. See [4]
971 *
972 * [10] There is no transient state which leaves owner and user space
973 * TID out of sync. Except one error case where the kernel is denied
974 * write access to the user address, see fixup_pi_state_owner().
975 *
976 *
977 * Serialization and lifetime rules:
978 *
979 * hb->lock:
980 *
981 * hb -> futex_q, relation
982 * futex_q -> pi_state, relation
983 *
984 * (cannot be raw because hb can contain arbitrary amount
985 * of futex_q's)
986 *
987 * pi_mutex->wait_lock:
988 *
989 * {uval, pi_state}
990 *
991 * (and pi_mutex 'obviously')
992 *
993 * p->pi_lock:
994 *
995 * p->pi_state_list -> pi_state->list, relation
996 *
997 * pi_state->refcount:
998 *
999 * pi_state lifetime
1000 *
1001 *
1002 * Lock order:
1003 *
1004 * hb->lock
1005 * pi_mutex->wait_lock
1006 * p->pi_lock
1007 *
1008 */
1009
1010 /*
1011 * Validate that the existing waiter has a pi_state and sanity check
1012 * the pi_state against the user space value. If correct, attach to
1013 * it.
1014 */
attach_to_pi_state(u32 __user * uaddr,u32 uval,struct futex_pi_state * pi_state,struct futex_pi_state ** ps)1015 static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
1016 struct futex_pi_state *pi_state,
1017 struct futex_pi_state **ps)
1018 {
1019 pid_t pid = uval & FUTEX_TID_MASK;
1020 u32 uval2;
1021 int ret;
1022
1023 /*
1024 * Userspace might have messed up non-PI and PI futexes [3]
1025 */
1026 if (unlikely(!pi_state))
1027 return -EINVAL;
1028
1029 /*
1030 * We get here with hb->lock held, and having found a
1031 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
1032 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
1033 * which in turn means that futex_lock_pi() still has a reference on
1034 * our pi_state.
1035 *
1036 * The waiter holding a reference on @pi_state also protects against
1037 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
1038 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
1039 * free pi_state before we can take a reference ourselves.
1040 */
1041 WARN_ON(!refcount_read(&pi_state->refcount));
1042
1043 /*
1044 * Now that we have a pi_state, we can acquire wait_lock
1045 * and do the state validation.
1046 */
1047 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1048
1049 /*
1050 * Since {uval, pi_state} is serialized by wait_lock, and our current
1051 * uval was read without holding it, it can have changed. Verify it
1052 * still is what we expect it to be, otherwise retry the entire
1053 * operation.
1054 */
1055 if (get_futex_value_locked(&uval2, uaddr))
1056 goto out_efault;
1057
1058 if (uval != uval2)
1059 goto out_eagain;
1060
1061 /*
1062 * Handle the owner died case:
1063 */
1064 if (uval & FUTEX_OWNER_DIED) {
1065 /*
1066 * exit_pi_state_list sets owner to NULL and wakes the
1067 * topmost waiter. The task which acquires the
1068 * pi_state->rt_mutex will fixup owner.
1069 */
1070 if (!pi_state->owner) {
1071 /*
1072 * No pi state owner, but the user space TID
1073 * is not 0. Inconsistent state. [5]
1074 */
1075 if (pid)
1076 goto out_einval;
1077 /*
1078 * Take a ref on the state and return success. [4]
1079 */
1080 goto out_attach;
1081 }
1082
1083 /*
1084 * If TID is 0, then either the dying owner has not
1085 * yet executed exit_pi_state_list() or some waiter
1086 * acquired the rtmutex in the pi state, but did not
1087 * yet fixup the TID in user space.
1088 *
1089 * Take a ref on the state and return success. [6]
1090 */
1091 if (!pid)
1092 goto out_attach;
1093 } else {
1094 /*
1095 * If the owner died bit is not set, then the pi_state
1096 * must have an owner. [7]
1097 */
1098 if (!pi_state->owner)
1099 goto out_einval;
1100 }
1101
1102 /*
1103 * Bail out if user space manipulated the futex value. If pi
1104 * state exists then the owner TID must be the same as the
1105 * user space TID. [9/10]
1106 */
1107 if (pid != task_pid_vnr(pi_state->owner))
1108 goto out_einval;
1109
1110 out_attach:
1111 get_pi_state(pi_state);
1112 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1113 *ps = pi_state;
1114 return 0;
1115
1116 out_einval:
1117 ret = -EINVAL;
1118 goto out_error;
1119
1120 out_eagain:
1121 ret = -EAGAIN;
1122 goto out_error;
1123
1124 out_efault:
1125 ret = -EFAULT;
1126 goto out_error;
1127
1128 out_error:
1129 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1130 return ret;
1131 }
1132
1133 /**
1134 * wait_for_owner_exiting - Block until the owner has exited
1135 * @ret: owner's current futex lock status
1136 * @exiting: Pointer to the exiting task
1137 *
1138 * Caller must hold a refcount on @exiting.
1139 */
wait_for_owner_exiting(int ret,struct task_struct * exiting)1140 static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
1141 {
1142 if (ret != -EBUSY) {
1143 WARN_ON_ONCE(exiting);
1144 return;
1145 }
1146
1147 if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
1148 return;
1149
1150 mutex_lock(&exiting->futex_exit_mutex);
1151 /*
1152 * No point in doing state checking here. If the waiter got here
1153 * while the task was in exec()->exec_futex_release() then it can
1154 * have any FUTEX_STATE_* value when the waiter has acquired the
1155 * mutex. OK, if running, EXITING or DEAD if it reached exit()
1156 * already. Highly unlikely and not a problem. Just one more round
1157 * through the futex maze.
1158 */
1159 mutex_unlock(&exiting->futex_exit_mutex);
1160
1161 put_task_struct(exiting);
1162 }
1163
handle_exit_race(u32 __user * uaddr,u32 uval,struct task_struct * tsk)1164 static int handle_exit_race(u32 __user *uaddr, u32 uval,
1165 struct task_struct *tsk)
1166 {
1167 u32 uval2;
1168
1169 /*
1170 * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
1171 * caller that the alleged owner is busy.
1172 */
1173 if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
1174 return -EBUSY;
1175
1176 /*
1177 * Reread the user space value to handle the following situation:
1178 *
1179 * CPU0 CPU1
1180 *
1181 * sys_exit() sys_futex()
1182 * do_exit() futex_lock_pi()
1183 * futex_lock_pi_atomic()
1184 * exit_signals(tsk) No waiters:
1185 * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
1186 * mm_release(tsk) Set waiter bit
1187 * exit_robust_list(tsk) { *uaddr = 0x80000PID;
1188 * Set owner died attach_to_pi_owner() {
1189 * *uaddr = 0xC0000000; tsk = get_task(PID);
1190 * } if (!tsk->flags & PF_EXITING) {
1191 * ... attach();
1192 * tsk->futex_state = } else {
1193 * FUTEX_STATE_DEAD; if (tsk->futex_state !=
1194 * FUTEX_STATE_DEAD)
1195 * return -EAGAIN;
1196 * return -ESRCH; <--- FAIL
1197 * }
1198 *
1199 * Returning ESRCH unconditionally is wrong here because the
1200 * user space value has been changed by the exiting task.
1201 *
1202 * The same logic applies to the case where the exiting task is
1203 * already gone.
1204 */
1205 if (get_futex_value_locked(&uval2, uaddr))
1206 return -EFAULT;
1207
1208 /* If the user space value has changed, try again. */
1209 if (uval2 != uval)
1210 return -EAGAIN;
1211
1212 /*
1213 * The exiting task did not have a robust list, the robust list was
1214 * corrupted or the user space value in *uaddr is simply bogus.
1215 * Give up and tell user space.
1216 */
1217 return -ESRCH;
1218 }
1219
1220 /*
1221 * Lookup the task for the TID provided from user space and attach to
1222 * it after doing proper sanity checks.
1223 */
attach_to_pi_owner(u32 __user * uaddr,u32 uval,union futex_key * key,struct futex_pi_state ** ps,struct task_struct ** exiting)1224 static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
1225 struct futex_pi_state **ps,
1226 struct task_struct **exiting)
1227 {
1228 pid_t pid = uval & FUTEX_TID_MASK;
1229 struct futex_pi_state *pi_state;
1230 struct task_struct *p;
1231
1232 /*
1233 * We are the first waiter - try to look up the real owner and attach
1234 * the new pi_state to it, but bail out when TID = 0 [1]
1235 *
1236 * The !pid check is paranoid. None of the call sites should end up
1237 * with pid == 0, but better safe than sorry. Let the caller retry
1238 */
1239 if (!pid)
1240 return -EAGAIN;
1241 p = find_get_task_by_vpid(pid);
1242 if (!p)
1243 return handle_exit_race(uaddr, uval, NULL);
1244
1245 if (unlikely(p->flags & PF_KTHREAD)) {
1246 put_task_struct(p);
1247 return -EPERM;
1248 }
1249
1250 /*
1251 * We need to look at the task state to figure out, whether the
1252 * task is exiting. To protect against the change of the task state
1253 * in futex_exit_release(), we do this protected by p->pi_lock:
1254 */
1255 raw_spin_lock_irq(&p->pi_lock);
1256 if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
1257 /*
1258 * The task is on the way out. When the futex state is
1259 * FUTEX_STATE_DEAD, we know that the task has finished
1260 * the cleanup:
1261 */
1262 int ret = handle_exit_race(uaddr, uval, p);
1263
1264 raw_spin_unlock_irq(&p->pi_lock);
1265 /*
1266 * If the owner task is between FUTEX_STATE_EXITING and
1267 * FUTEX_STATE_DEAD then store the task pointer and keep
1268 * the reference on the task struct. The calling code will
1269 * drop all locks, wait for the task to reach
1270 * FUTEX_STATE_DEAD and then drop the refcount. This is
1271 * required to prevent a live lock when the current task
1272 * preempted the exiting task between the two states.
1273 */
1274 if (ret == -EBUSY)
1275 *exiting = p;
1276 else
1277 put_task_struct(p);
1278 return ret;
1279 }
1280
1281 /*
1282 * No existing pi state. First waiter. [2]
1283 *
1284 * This creates pi_state, we have hb->lock held, this means nothing can
1285 * observe this state, wait_lock is irrelevant.
1286 */
1287 pi_state = alloc_pi_state();
1288
1289 /*
1290 * Initialize the pi_mutex in locked state and make @p
1291 * the owner of it:
1292 */
1293 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1294
1295 /* Store the key for possible exit cleanups: */
1296 pi_state->key = *key;
1297
1298 WARN_ON(!list_empty(&pi_state->list));
1299 list_add(&pi_state->list, &p->pi_state_list);
1300 /*
1301 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
1302 * because there is no concurrency as the object is not published yet.
1303 */
1304 pi_state->owner = p;
1305 raw_spin_unlock_irq(&p->pi_lock);
1306
1307 put_task_struct(p);
1308
1309 *ps = pi_state;
1310
1311 return 0;
1312 }
1313
lookup_pi_state(u32 __user * uaddr,u32 uval,struct futex_hash_bucket * hb,union futex_key * key,struct futex_pi_state ** ps,struct task_struct ** exiting)1314 static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1315 struct futex_hash_bucket *hb,
1316 union futex_key *key, struct futex_pi_state **ps,
1317 struct task_struct **exiting)
1318 {
1319 struct futex_q *top_waiter = futex_top_waiter(hb, key);
1320
1321 /*
1322 * If there is a waiter on that futex, validate it and
1323 * attach to the pi_state when the validation succeeds.
1324 */
1325 if (top_waiter)
1326 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1327
1328 /*
1329 * We are the first waiter - try to look up the owner based on
1330 * @uval and attach to it.
1331 */
1332 return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
1333 }
1334
lock_pi_update_atomic(u32 __user * uaddr,u32 uval,u32 newval)1335 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1336 {
1337 int err;
1338 u32 curval;
1339
1340 if (unlikely(should_fail_futex(true)))
1341 return -EFAULT;
1342
1343 err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1344 if (unlikely(err))
1345 return err;
1346
1347 /* If user space value changed, let the caller retry */
1348 return curval != uval ? -EAGAIN : 0;
1349 }
1350
1351 /**
1352 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1353 * @uaddr: the pi futex user address
1354 * @hb: the pi futex hash bucket
1355 * @key: the futex key associated with uaddr and hb
1356 * @ps: the pi_state pointer where we store the result of the
1357 * lookup
1358 * @task: the task to perform the atomic lock work for. This will
1359 * be "current" except in the case of requeue pi.
1360 * @exiting: Pointer to store the task pointer of the owner task
1361 * which is in the middle of exiting
1362 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1363 *
1364 * Return:
1365 * - 0 - ready to wait;
1366 * - 1 - acquired the lock;
1367 * - <0 - error
1368 *
1369 * The hb->lock and futex_key refs shall be held by the caller.
1370 *
1371 * @exiting is only set when the return value is -EBUSY. If so, this holds
1372 * a refcount on the exiting task on return and the caller needs to drop it
1373 * after waiting for the exit to complete.
1374 */
futex_lock_pi_atomic(u32 __user * uaddr,struct futex_hash_bucket * hb,union futex_key * key,struct futex_pi_state ** ps,struct task_struct * task,struct task_struct ** exiting,int set_waiters)1375 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1376 union futex_key *key,
1377 struct futex_pi_state **ps,
1378 struct task_struct *task,
1379 struct task_struct **exiting,
1380 int set_waiters)
1381 {
1382 u32 uval, newval, vpid = task_pid_vnr(task);
1383 struct futex_q *top_waiter;
1384 int ret;
1385
1386 /*
1387 * Read the user space value first so we can validate a few
1388 * things before proceeding further.
1389 */
1390 if (get_futex_value_locked(&uval, uaddr))
1391 return -EFAULT;
1392
1393 if (unlikely(should_fail_futex(true)))
1394 return -EFAULT;
1395
1396 /*
1397 * Detect deadlocks.
1398 */
1399 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1400 return -EDEADLK;
1401
1402 if ((unlikely(should_fail_futex(true))))
1403 return -EDEADLK;
1404
1405 /*
1406 * Lookup existing state first. If it exists, try to attach to
1407 * its pi_state.
1408 */
1409 top_waiter = futex_top_waiter(hb, key);
1410 if (top_waiter)
1411 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1412
1413 /*
1414 * No waiter and user TID is 0. We are here because the
1415 * waiters or the owner died bit is set or called from
1416 * requeue_cmp_pi or for whatever reason something took the
1417 * syscall.
1418 */
1419 if (!(uval & FUTEX_TID_MASK)) {
1420 /*
1421 * We take over the futex. No other waiters and the user space
1422 * TID is 0. We preserve the owner died bit.
1423 */
1424 newval = uval & FUTEX_OWNER_DIED;
1425 newval |= vpid;
1426
1427 /* The futex requeue_pi code can enforce the waiters bit */
1428 if (set_waiters)
1429 newval |= FUTEX_WAITERS;
1430
1431 ret = lock_pi_update_atomic(uaddr, uval, newval);
1432 /* If the take over worked, return 1 */
1433 return ret < 0 ? ret : 1;
1434 }
1435
1436 /*
1437 * First waiter. Set the waiters bit before attaching ourself to
1438 * the owner. If owner tries to unlock, it will be forced into
1439 * the kernel and blocked on hb->lock.
1440 */
1441 newval = uval | FUTEX_WAITERS;
1442 ret = lock_pi_update_atomic(uaddr, uval, newval);
1443 if (ret)
1444 return ret;
1445 /*
1446 * If the update of the user space value succeeded, we try to
1447 * attach to the owner. If that fails, no harm done, we only
1448 * set the FUTEX_WAITERS bit in the user space variable.
1449 */
1450 return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
1451 }
1452
1453 /**
1454 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1455 * @q: The futex_q to unqueue
1456 *
1457 * The q->lock_ptr must not be NULL and must be held by the caller.
1458 */
__unqueue_futex(struct futex_q * q)1459 static void __unqueue_futex(struct futex_q *q)
1460 {
1461 struct futex_hash_bucket *hb;
1462
1463 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
1464 return;
1465 lockdep_assert_held(q->lock_ptr);
1466
1467 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1468 plist_del(&q->list, &hb->chain);
1469 hb_waiters_dec(hb);
1470 }
1471
1472 /*
1473 * The hash bucket lock must be held when this is called.
1474 * Afterwards, the futex_q must not be accessed. Callers
1475 * must ensure to later call wake_up_q() for the actual
1476 * wakeups to occur.
1477 */
mark_wake_futex(struct wake_q_head * wake_q,struct futex_q * q)1478 static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1479 {
1480 struct task_struct *p = q->task;
1481
1482 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1483 return;
1484
1485 get_task_struct(p);
1486 __unqueue_futex(q);
1487 /*
1488 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
1489 * is written, without taking any locks. This is possible in the event
1490 * of a spurious wakeup, for example. A memory barrier is required here
1491 * to prevent the following store to lock_ptr from getting ahead of the
1492 * plist_del in __unqueue_futex().
1493 */
1494 smp_store_release(&q->lock_ptr, NULL);
1495
1496 /*
1497 * Queue the task for later wakeup for after we've released
1498 * the hb->lock.
1499 */
1500 wake_q_add_safe(wake_q, p);
1501 }
1502
1503 /*
1504 * Caller must hold a reference on @pi_state.
1505 */
wake_futex_pi(u32 __user * uaddr,u32 uval,struct futex_pi_state * pi_state)1506 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1507 {
1508 u32 curval, newval;
1509 struct task_struct *new_owner;
1510 bool postunlock = false;
1511 DEFINE_WAKE_Q(wake_q);
1512 int ret = 0;
1513
1514 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1515 if (WARN_ON_ONCE(!new_owner)) {
1516 /*
1517 * As per the comment in futex_unlock_pi() this should not happen.
1518 *
1519 * When this happens, give up our locks and try again, giving
1520 * the futex_lock_pi() instance time to complete, either by
1521 * waiting on the rtmutex or removing itself from the futex
1522 * queue.
1523 */
1524 ret = -EAGAIN;
1525 goto out_unlock;
1526 }
1527
1528 /*
1529 * We pass it to the next owner. The WAITERS bit is always kept
1530 * enabled while there is PI state around. We cleanup the owner
1531 * died bit, because we are the owner.
1532 */
1533 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1534
1535 if (unlikely(should_fail_futex(true))) {
1536 ret = -EFAULT;
1537 goto out_unlock;
1538 }
1539
1540 ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1541 if (!ret && (curval != uval)) {
1542 /*
1543 * If a unconditional UNLOCK_PI operation (user space did not
1544 * try the TID->0 transition) raced with a waiter setting the
1545 * FUTEX_WAITERS flag between get_user() and locking the hash
1546 * bucket lock, retry the operation.
1547 */
1548 if ((FUTEX_TID_MASK & curval) == uval)
1549 ret = -EAGAIN;
1550 else
1551 ret = -EINVAL;
1552 }
1553
1554 if (!ret) {
1555 /*
1556 * This is a point of no return; once we modified the uval
1557 * there is no going back and subsequent operations must
1558 * not fail.
1559 */
1560 pi_state_update_owner(pi_state, new_owner);
1561 postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1562 }
1563
1564 out_unlock:
1565 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1566
1567 if (postunlock)
1568 rt_mutex_postunlock(&wake_q);
1569
1570 return ret;
1571 }
1572
1573 /*
1574 * Express the locking dependencies for lockdep:
1575 */
1576 static inline void
double_lock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)1577 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1578 {
1579 if (hb1 <= hb2) {
1580 spin_lock(&hb1->lock);
1581 if (hb1 < hb2)
1582 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1583 } else { /* hb1 > hb2 */
1584 spin_lock(&hb2->lock);
1585 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1586 }
1587 }
1588
1589 static inline void
double_unlock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)1590 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1591 {
1592 spin_unlock(&hb1->lock);
1593 if (hb1 != hb2)
1594 spin_unlock(&hb2->lock);
1595 }
1596
1597 /*
1598 * Wake up waiters matching bitset queued on this futex (uaddr).
1599 */
1600 static int
futex_wake(u32 __user * uaddr,unsigned int flags,int nr_wake,u32 bitset)1601 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1602 {
1603 struct futex_hash_bucket *hb;
1604 struct futex_q *this, *next;
1605 union futex_key key = FUTEX_KEY_INIT;
1606 int ret;
1607 int target_nr;
1608 DEFINE_WAKE_Q(wake_q);
1609
1610 if (!bitset)
1611 return -EINVAL;
1612
1613 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
1614 if (unlikely(ret != 0))
1615 return ret;
1616
1617 hb = hash_futex(&key);
1618
1619 /* Make sure we really have tasks to wakeup */
1620 if (!hb_waiters_pending(hb))
1621 return ret;
1622
1623 spin_lock(&hb->lock);
1624
1625 trace_android_vh_futex_wake_traverse_plist(&hb->chain, &target_nr, key, bitset);
1626 plist_for_each_entry_safe(this, next, &hb->chain, list) {
1627 if (match_futex (&this->key, &key)) {
1628 if (this->pi_state || this->rt_waiter) {
1629 ret = -EINVAL;
1630 break;
1631 }
1632
1633 /* Check if one of the bits is set in both bitsets */
1634 if (!(this->bitset & bitset))
1635 continue;
1636
1637 trace_android_vh_futex_wake_this(ret, nr_wake, target_nr, this->task);
1638 mark_wake_futex(&wake_q, this);
1639 if (++ret >= nr_wake)
1640 break;
1641 }
1642 }
1643
1644 spin_unlock(&hb->lock);
1645 wake_up_q(&wake_q);
1646 trace_android_vh_futex_wake_up_q_finish(nr_wake, target_nr);
1647 return ret;
1648 }
1649
futex_atomic_op_inuser(unsigned int encoded_op,u32 __user * uaddr)1650 static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1651 {
1652 unsigned int op = (encoded_op & 0x70000000) >> 28;
1653 unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
1654 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
1655 int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1656 int oldval, ret;
1657
1658 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1659 if (oparg < 0 || oparg > 31) {
1660 char comm[sizeof(current->comm)];
1661 /*
1662 * kill this print and return -EINVAL when userspace
1663 * is sane again
1664 */
1665 pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
1666 get_task_comm(comm, current), oparg);
1667 oparg &= 31;
1668 }
1669 oparg = 1 << oparg;
1670 }
1671
1672 pagefault_disable();
1673 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
1674 pagefault_enable();
1675 if (ret)
1676 return ret;
1677
1678 switch (cmp) {
1679 case FUTEX_OP_CMP_EQ:
1680 return oldval == cmparg;
1681 case FUTEX_OP_CMP_NE:
1682 return oldval != cmparg;
1683 case FUTEX_OP_CMP_LT:
1684 return oldval < cmparg;
1685 case FUTEX_OP_CMP_GE:
1686 return oldval >= cmparg;
1687 case FUTEX_OP_CMP_LE:
1688 return oldval <= cmparg;
1689 case FUTEX_OP_CMP_GT:
1690 return oldval > cmparg;
1691 default:
1692 return -ENOSYS;
1693 }
1694 }
1695
1696 /*
1697 * Wake up all waiters hashed on the physical page that is mapped
1698 * to this virtual address:
1699 */
1700 static int
futex_wake_op(u32 __user * uaddr1,unsigned int flags,u32 __user * uaddr2,int nr_wake,int nr_wake2,int op)1701 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1702 int nr_wake, int nr_wake2, int op)
1703 {
1704 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1705 struct futex_hash_bucket *hb1, *hb2;
1706 struct futex_q *this, *next;
1707 int ret, op_ret;
1708 DEFINE_WAKE_Q(wake_q);
1709
1710 retry:
1711 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1712 if (unlikely(ret != 0))
1713 return ret;
1714 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
1715 if (unlikely(ret != 0))
1716 return ret;
1717
1718 hb1 = hash_futex(&key1);
1719 hb2 = hash_futex(&key2);
1720
1721 retry_private:
1722 double_lock_hb(hb1, hb2);
1723 op_ret = futex_atomic_op_inuser(op, uaddr2);
1724 if (unlikely(op_ret < 0)) {
1725 double_unlock_hb(hb1, hb2);
1726
1727 if (!IS_ENABLED(CONFIG_MMU) ||
1728 unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
1729 /*
1730 * we don't get EFAULT from MMU faults if we don't have
1731 * an MMU, but we might get them from range checking
1732 */
1733 ret = op_ret;
1734 return ret;
1735 }
1736
1737 if (op_ret == -EFAULT) {
1738 ret = fault_in_user_writeable(uaddr2);
1739 if (ret)
1740 return ret;
1741 }
1742
1743 if (!(flags & FLAGS_SHARED)) {
1744 cond_resched();
1745 goto retry_private;
1746 }
1747
1748 cond_resched();
1749 goto retry;
1750 }
1751
1752 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1753 if (match_futex (&this->key, &key1)) {
1754 if (this->pi_state || this->rt_waiter) {
1755 ret = -EINVAL;
1756 goto out_unlock;
1757 }
1758 mark_wake_futex(&wake_q, this);
1759 if (++ret >= nr_wake)
1760 break;
1761 }
1762 }
1763
1764 if (op_ret > 0) {
1765 op_ret = 0;
1766 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1767 if (match_futex (&this->key, &key2)) {
1768 if (this->pi_state || this->rt_waiter) {
1769 ret = -EINVAL;
1770 goto out_unlock;
1771 }
1772 mark_wake_futex(&wake_q, this);
1773 if (++op_ret >= nr_wake2)
1774 break;
1775 }
1776 }
1777 ret += op_ret;
1778 }
1779
1780 out_unlock:
1781 double_unlock_hb(hb1, hb2);
1782 wake_up_q(&wake_q);
1783 return ret;
1784 }
1785
1786 /**
1787 * requeue_futex() - Requeue a futex_q from one hb to another
1788 * @q: the futex_q to requeue
1789 * @hb1: the source hash_bucket
1790 * @hb2: the target hash_bucket
1791 * @key2: the new key for the requeued futex_q
1792 */
1793 static inline
requeue_futex(struct futex_q * q,struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2,union futex_key * key2)1794 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1795 struct futex_hash_bucket *hb2, union futex_key *key2)
1796 {
1797
1798 /*
1799 * If key1 and key2 hash to the same bucket, no need to
1800 * requeue.
1801 */
1802 if (likely(&hb1->chain != &hb2->chain)) {
1803 plist_del(&q->list, &hb1->chain);
1804 hb_waiters_dec(hb1);
1805 hb_waiters_inc(hb2);
1806 plist_add(&q->list, &hb2->chain);
1807 q->lock_ptr = &hb2->lock;
1808 }
1809 q->key = *key2;
1810 }
1811
1812 /**
1813 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1814 * @q: the futex_q
1815 * @key: the key of the requeue target futex
1816 * @hb: the hash_bucket of the requeue target futex
1817 *
1818 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1819 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1820 * to the requeue target futex so the waiter can detect the wakeup on the right
1821 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1822 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1823 * to protect access to the pi_state to fixup the owner later. Must be called
1824 * with both q->lock_ptr and hb->lock held.
1825 */
1826 static inline
requeue_pi_wake_futex(struct futex_q * q,union futex_key * key,struct futex_hash_bucket * hb)1827 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1828 struct futex_hash_bucket *hb)
1829 {
1830 q->key = *key;
1831
1832 __unqueue_futex(q);
1833
1834 WARN_ON(!q->rt_waiter);
1835 q->rt_waiter = NULL;
1836
1837 q->lock_ptr = &hb->lock;
1838
1839 wake_up_state(q->task, TASK_NORMAL);
1840 }
1841
1842 /**
1843 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1844 * @pifutex: the user address of the to futex
1845 * @hb1: the from futex hash bucket, must be locked by the caller
1846 * @hb2: the to futex hash bucket, must be locked by the caller
1847 * @key1: the from futex key
1848 * @key2: the to futex key
1849 * @ps: address to store the pi_state pointer
1850 * @exiting: Pointer to store the task pointer of the owner task
1851 * which is in the middle of exiting
1852 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1853 *
1854 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1855 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1856 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1857 * hb1 and hb2 must be held by the caller.
1858 *
1859 * @exiting is only set when the return value is -EBUSY. If so, this holds
1860 * a refcount on the exiting task on return and the caller needs to drop it
1861 * after waiting for the exit to complete.
1862 *
1863 * Return:
1864 * - 0 - failed to acquire the lock atomically;
1865 * - >0 - acquired the lock, return value is vpid of the top_waiter
1866 * - <0 - error
1867 */
1868 static int
futex_proxy_trylock_atomic(u32 __user * pifutex,struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2,union futex_key * key1,union futex_key * key2,struct futex_pi_state ** ps,struct task_struct ** exiting,int set_waiters)1869 futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
1870 struct futex_hash_bucket *hb2, union futex_key *key1,
1871 union futex_key *key2, struct futex_pi_state **ps,
1872 struct task_struct **exiting, int set_waiters)
1873 {
1874 struct futex_q *top_waiter = NULL;
1875 u32 curval;
1876 int ret, vpid;
1877
1878 if (get_futex_value_locked(&curval, pifutex))
1879 return -EFAULT;
1880
1881 if (unlikely(should_fail_futex(true)))
1882 return -EFAULT;
1883
1884 /*
1885 * Find the top_waiter and determine if there are additional waiters.
1886 * If the caller intends to requeue more than 1 waiter to pifutex,
1887 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1888 * as we have means to handle the possible fault. If not, don't set
1889 * the bit unecessarily as it will force the subsequent unlock to enter
1890 * the kernel.
1891 */
1892 top_waiter = futex_top_waiter(hb1, key1);
1893
1894 /* There are no waiters, nothing for us to do. */
1895 if (!top_waiter)
1896 return 0;
1897
1898 /* Ensure we requeue to the expected futex. */
1899 if (!match_futex(top_waiter->requeue_pi_key, key2))
1900 return -EINVAL;
1901
1902 /*
1903 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1904 * the contended case or if set_waiters is 1. The pi_state is returned
1905 * in ps in contended cases.
1906 */
1907 vpid = task_pid_vnr(top_waiter->task);
1908 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1909 exiting, set_waiters);
1910 if (ret == 1) {
1911 requeue_pi_wake_futex(top_waiter, key2, hb2);
1912 return vpid;
1913 }
1914 return ret;
1915 }
1916
1917 /**
1918 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1919 * @uaddr1: source futex user address
1920 * @flags: futex flags (FLAGS_SHARED, etc.)
1921 * @uaddr2: target futex user address
1922 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1923 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1924 * @cmpval: @uaddr1 expected value (or %NULL)
1925 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1926 * pi futex (pi to pi requeue is not supported)
1927 *
1928 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1929 * uaddr2 atomically on behalf of the top waiter.
1930 *
1931 * Return:
1932 * - >=0 - on success, the number of tasks requeued or woken;
1933 * - <0 - on error
1934 */
futex_requeue(u32 __user * uaddr1,unsigned int flags,u32 __user * uaddr2,int nr_wake,int nr_requeue,u32 * cmpval,int requeue_pi)1935 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1936 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1937 u32 *cmpval, int requeue_pi)
1938 {
1939 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1940 int task_count = 0, ret;
1941 struct futex_pi_state *pi_state = NULL;
1942 struct futex_hash_bucket *hb1, *hb2;
1943 struct futex_q *this, *next;
1944 DEFINE_WAKE_Q(wake_q);
1945
1946 if (nr_wake < 0 || nr_requeue < 0)
1947 return -EINVAL;
1948
1949 /*
1950 * When PI not supported: return -ENOSYS if requeue_pi is true,
1951 * consequently the compiler knows requeue_pi is always false past
1952 * this point which will optimize away all the conditional code
1953 * further down.
1954 */
1955 if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
1956 return -ENOSYS;
1957
1958 if (requeue_pi) {
1959 /*
1960 * Requeue PI only works on two distinct uaddrs. This
1961 * check is only valid for private futexes. See below.
1962 */
1963 if (uaddr1 == uaddr2)
1964 return -EINVAL;
1965
1966 /*
1967 * requeue_pi requires a pi_state, try to allocate it now
1968 * without any locks in case it fails.
1969 */
1970 if (refill_pi_state_cache())
1971 return -ENOMEM;
1972 /*
1973 * requeue_pi must wake as many tasks as it can, up to nr_wake
1974 * + nr_requeue, since it acquires the rt_mutex prior to
1975 * returning to userspace, so as to not leave the rt_mutex with
1976 * waiters and no owner. However, second and third wake-ups
1977 * cannot be predicted as they involve race conditions with the
1978 * first wake and a fault while looking up the pi_state. Both
1979 * pthread_cond_signal() and pthread_cond_broadcast() should
1980 * use nr_wake=1.
1981 */
1982 if (nr_wake != 1)
1983 return -EINVAL;
1984 }
1985
1986 retry:
1987 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1988 if (unlikely(ret != 0))
1989 return ret;
1990 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1991 requeue_pi ? FUTEX_WRITE : FUTEX_READ);
1992 if (unlikely(ret != 0))
1993 return ret;
1994
1995 /*
1996 * The check above which compares uaddrs is not sufficient for
1997 * shared futexes. We need to compare the keys:
1998 */
1999 if (requeue_pi && match_futex(&key1, &key2))
2000 return -EINVAL;
2001
2002 hb1 = hash_futex(&key1);
2003 hb2 = hash_futex(&key2);
2004
2005 retry_private:
2006 hb_waiters_inc(hb2);
2007 double_lock_hb(hb1, hb2);
2008
2009 if (likely(cmpval != NULL)) {
2010 u32 curval;
2011
2012 ret = get_futex_value_locked(&curval, uaddr1);
2013
2014 if (unlikely(ret)) {
2015 double_unlock_hb(hb1, hb2);
2016 hb_waiters_dec(hb2);
2017
2018 ret = get_user(curval, uaddr1);
2019 if (ret)
2020 return ret;
2021
2022 if (!(flags & FLAGS_SHARED))
2023 goto retry_private;
2024
2025 goto retry;
2026 }
2027 if (curval != *cmpval) {
2028 ret = -EAGAIN;
2029 goto out_unlock;
2030 }
2031 }
2032
2033 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
2034 struct task_struct *exiting = NULL;
2035
2036 /*
2037 * Attempt to acquire uaddr2 and wake the top waiter. If we
2038 * intend to requeue waiters, force setting the FUTEX_WAITERS
2039 * bit. We force this here where we are able to easily handle
2040 * faults rather in the requeue loop below.
2041 */
2042 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
2043 &key2, &pi_state,
2044 &exiting, nr_requeue);
2045
2046 /*
2047 * At this point the top_waiter has either taken uaddr2 or is
2048 * waiting on it. If the former, then the pi_state will not
2049 * exist yet, look it up one more time to ensure we have a
2050 * reference to it. If the lock was taken, ret contains the
2051 * vpid of the top waiter task.
2052 * If the lock was not taken, we have pi_state and an initial
2053 * refcount on it. In case of an error we have nothing.
2054 */
2055 if (ret > 0) {
2056 WARN_ON(pi_state);
2057 task_count++;
2058 /*
2059 * If we acquired the lock, then the user space value
2060 * of uaddr2 should be vpid. It cannot be changed by
2061 * the top waiter as it is blocked on hb2 lock if it
2062 * tries to do so. If something fiddled with it behind
2063 * our back the pi state lookup might unearth it. So
2064 * we rather use the known value than rereading and
2065 * handing potential crap to lookup_pi_state.
2066 *
2067 * If that call succeeds then we have pi_state and an
2068 * initial refcount on it.
2069 */
2070 ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
2071 &pi_state, &exiting);
2072 }
2073
2074 switch (ret) {
2075 case 0:
2076 /* We hold a reference on the pi state. */
2077 break;
2078
2079 /* If the above failed, then pi_state is NULL */
2080 case -EFAULT:
2081 double_unlock_hb(hb1, hb2);
2082 hb_waiters_dec(hb2);
2083 ret = fault_in_user_writeable(uaddr2);
2084 if (!ret)
2085 goto retry;
2086 return ret;
2087 case -EBUSY:
2088 case -EAGAIN:
2089 /*
2090 * Two reasons for this:
2091 * - EBUSY: Owner is exiting and we just wait for the
2092 * exit to complete.
2093 * - EAGAIN: The user space value changed.
2094 */
2095 double_unlock_hb(hb1, hb2);
2096 hb_waiters_dec(hb2);
2097 /*
2098 * Handle the case where the owner is in the middle of
2099 * exiting. Wait for the exit to complete otherwise
2100 * this task might loop forever, aka. live lock.
2101 */
2102 wait_for_owner_exiting(ret, exiting);
2103 cond_resched();
2104 goto retry;
2105 default:
2106 goto out_unlock;
2107 }
2108 }
2109
2110 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
2111 if (task_count - nr_wake >= nr_requeue)
2112 break;
2113
2114 if (!match_futex(&this->key, &key1))
2115 continue;
2116
2117 /*
2118 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
2119 * be paired with each other and no other futex ops.
2120 *
2121 * We should never be requeueing a futex_q with a pi_state,
2122 * which is awaiting a futex_unlock_pi().
2123 */
2124 if ((requeue_pi && !this->rt_waiter) ||
2125 (!requeue_pi && this->rt_waiter) ||
2126 this->pi_state) {
2127 ret = -EINVAL;
2128 break;
2129 }
2130
2131 /*
2132 * Wake nr_wake waiters. For requeue_pi, if we acquired the
2133 * lock, we already woke the top_waiter. If not, it will be
2134 * woken by futex_unlock_pi().
2135 */
2136 if (++task_count <= nr_wake && !requeue_pi) {
2137 mark_wake_futex(&wake_q, this);
2138 continue;
2139 }
2140
2141 /* Ensure we requeue to the expected futex for requeue_pi. */
2142 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
2143 ret = -EINVAL;
2144 break;
2145 }
2146
2147 /*
2148 * Requeue nr_requeue waiters and possibly one more in the case
2149 * of requeue_pi if we couldn't acquire the lock atomically.
2150 */
2151 if (requeue_pi) {
2152 /*
2153 * Prepare the waiter to take the rt_mutex. Take a
2154 * refcount on the pi_state and store the pointer in
2155 * the futex_q object of the waiter.
2156 */
2157 get_pi_state(pi_state);
2158 this->pi_state = pi_state;
2159 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
2160 this->rt_waiter,
2161 this->task);
2162 if (ret == 1) {
2163 /*
2164 * We got the lock. We do neither drop the
2165 * refcount on pi_state nor clear
2166 * this->pi_state because the waiter needs the
2167 * pi_state for cleaning up the user space
2168 * value. It will drop the refcount after
2169 * doing so.
2170 */
2171 requeue_pi_wake_futex(this, &key2, hb2);
2172 continue;
2173 } else if (ret) {
2174 /*
2175 * rt_mutex_start_proxy_lock() detected a
2176 * potential deadlock when we tried to queue
2177 * that waiter. Drop the pi_state reference
2178 * which we took above and remove the pointer
2179 * to the state from the waiters futex_q
2180 * object.
2181 */
2182 this->pi_state = NULL;
2183 put_pi_state(pi_state);
2184 /*
2185 * We stop queueing more waiters and let user
2186 * space deal with the mess.
2187 */
2188 break;
2189 }
2190 }
2191 requeue_futex(this, hb1, hb2, &key2);
2192 }
2193
2194 /*
2195 * We took an extra initial reference to the pi_state either
2196 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
2197 * need to drop it here again.
2198 */
2199 put_pi_state(pi_state);
2200
2201 out_unlock:
2202 double_unlock_hb(hb1, hb2);
2203 wake_up_q(&wake_q);
2204 hb_waiters_dec(hb2);
2205 return ret ? ret : task_count;
2206 }
2207
2208 /* The key must be already stored in q->key. */
queue_lock(struct futex_q * q)2209 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2210 __acquires(&hb->lock)
2211 {
2212 struct futex_hash_bucket *hb;
2213
2214 hb = hash_futex(&q->key);
2215
2216 /*
2217 * Increment the counter before taking the lock so that
2218 * a potential waker won't miss a to-be-slept task that is
2219 * waiting for the spinlock. This is safe as all queue_lock()
2220 * users end up calling queue_me(). Similarly, for housekeeping,
2221 * decrement the counter at queue_unlock() when some error has
2222 * occurred and we don't end up adding the task to the list.
2223 */
2224 hb_waiters_inc(hb); /* implies smp_mb(); (A) */
2225
2226 q->lock_ptr = &hb->lock;
2227
2228 spin_lock(&hb->lock);
2229 return hb;
2230 }
2231
2232 static inline void
queue_unlock(struct futex_hash_bucket * hb)2233 queue_unlock(struct futex_hash_bucket *hb)
2234 __releases(&hb->lock)
2235 {
2236 spin_unlock(&hb->lock);
2237 hb_waiters_dec(hb);
2238 }
2239
__queue_me(struct futex_q * q,struct futex_hash_bucket * hb)2240 static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2241 {
2242 int prio;
2243 bool already_on_hb = false;
2244
2245 /*
2246 * The priority used to register this element is
2247 * - either the real thread-priority for the real-time threads
2248 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2249 * - or MAX_RT_PRIO for non-RT threads.
2250 * Thus, all RT-threads are woken first in priority order, and
2251 * the others are woken last, in FIFO order.
2252 */
2253 prio = min(current->normal_prio, MAX_RT_PRIO);
2254
2255 plist_node_init(&q->list, prio);
2256 trace_android_vh_alter_futex_plist_add(&q->list, &hb->chain, &already_on_hb);
2257 if (!already_on_hb)
2258 plist_add(&q->list, &hb->chain);
2259 q->task = current;
2260 }
2261
2262 /**
2263 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2264 * @q: The futex_q to enqueue
2265 * @hb: The destination hash bucket
2266 *
2267 * The hb->lock must be held by the caller, and is released here. A call to
2268 * queue_me() is typically paired with exactly one call to unqueue_me(). The
2269 * exceptions involve the PI related operations, which may use unqueue_me_pi()
2270 * or nothing if the unqueue is done as part of the wake process and the unqueue
2271 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2272 * an example).
2273 */
queue_me(struct futex_q * q,struct futex_hash_bucket * hb)2274 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2275 __releases(&hb->lock)
2276 {
2277 __queue_me(q, hb);
2278 spin_unlock(&hb->lock);
2279 }
2280
2281 /**
2282 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2283 * @q: The futex_q to unqueue
2284 *
2285 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2286 * be paired with exactly one earlier call to queue_me().
2287 *
2288 * Return:
2289 * - 1 - if the futex_q was still queued (and we removed unqueued it);
2290 * - 0 - if the futex_q was already removed by the waking thread
2291 */
unqueue_me(struct futex_q * q)2292 static int unqueue_me(struct futex_q *q)
2293 {
2294 spinlock_t *lock_ptr;
2295 int ret = 0;
2296
2297 /* In the common case we don't take the spinlock, which is nice. */
2298 retry:
2299 /*
2300 * q->lock_ptr can change between this read and the following spin_lock.
2301 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2302 * optimizing lock_ptr out of the logic below.
2303 */
2304 lock_ptr = READ_ONCE(q->lock_ptr);
2305 if (lock_ptr != NULL) {
2306 spin_lock(lock_ptr);
2307 /*
2308 * q->lock_ptr can change between reading it and
2309 * spin_lock(), causing us to take the wrong lock. This
2310 * corrects the race condition.
2311 *
2312 * Reasoning goes like this: if we have the wrong lock,
2313 * q->lock_ptr must have changed (maybe several times)
2314 * between reading it and the spin_lock(). It can
2315 * change again after the spin_lock() but only if it was
2316 * already changed before the spin_lock(). It cannot,
2317 * however, change back to the original value. Therefore
2318 * we can detect whether we acquired the correct lock.
2319 */
2320 if (unlikely(lock_ptr != q->lock_ptr)) {
2321 spin_unlock(lock_ptr);
2322 goto retry;
2323 }
2324 __unqueue_futex(q);
2325
2326 BUG_ON(q->pi_state);
2327
2328 spin_unlock(lock_ptr);
2329 ret = 1;
2330 }
2331
2332 return ret;
2333 }
2334
2335 /*
2336 * PI futexes can not be requeued and must remove themself from the
2337 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2338 * and dropped here.
2339 */
unqueue_me_pi(struct futex_q * q)2340 static void unqueue_me_pi(struct futex_q *q)
2341 __releases(q->lock_ptr)
2342 {
2343 __unqueue_futex(q);
2344
2345 BUG_ON(!q->pi_state);
2346 put_pi_state(q->pi_state);
2347 q->pi_state = NULL;
2348
2349 spin_unlock(q->lock_ptr);
2350 }
2351
__fixup_pi_state_owner(u32 __user * uaddr,struct futex_q * q,struct task_struct * argowner)2352 static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2353 struct task_struct *argowner)
2354 {
2355 struct futex_pi_state *pi_state = q->pi_state;
2356 struct task_struct *oldowner, *newowner;
2357 u32 uval, curval, newval, newtid;
2358 int err = 0;
2359
2360 oldowner = pi_state->owner;
2361
2362 /*
2363 * We are here because either:
2364 *
2365 * - we stole the lock and pi_state->owner needs updating to reflect
2366 * that (@argowner == current),
2367 *
2368 * or:
2369 *
2370 * - someone stole our lock and we need to fix things to point to the
2371 * new owner (@argowner == NULL).
2372 *
2373 * Either way, we have to replace the TID in the user space variable.
2374 * This must be atomic as we have to preserve the owner died bit here.
2375 *
2376 * Note: We write the user space value _before_ changing the pi_state
2377 * because we can fault here. Imagine swapped out pages or a fork
2378 * that marked all the anonymous memory readonly for cow.
2379 *
2380 * Modifying pi_state _before_ the user space value would leave the
2381 * pi_state in an inconsistent state when we fault here, because we
2382 * need to drop the locks to handle the fault. This might be observed
2383 * in the PID check in lookup_pi_state.
2384 */
2385 retry:
2386 if (!argowner) {
2387 if (oldowner != current) {
2388 /*
2389 * We raced against a concurrent self; things are
2390 * already fixed up. Nothing to do.
2391 */
2392 return 0;
2393 }
2394
2395 if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
2396 /* We got the lock. pi_state is correct. Tell caller. */
2397 return 1;
2398 }
2399
2400 /*
2401 * The trylock just failed, so either there is an owner or
2402 * there is a higher priority waiter than this one.
2403 */
2404 newowner = rt_mutex_owner(&pi_state->pi_mutex);
2405 /*
2406 * If the higher priority waiter has not yet taken over the
2407 * rtmutex then newowner is NULL. We can't return here with
2408 * that state because it's inconsistent vs. the user space
2409 * state. So drop the locks and try again. It's a valid
2410 * situation and not any different from the other retry
2411 * conditions.
2412 */
2413 if (unlikely(!newowner)) {
2414 err = -EAGAIN;
2415 goto handle_err;
2416 }
2417 } else {
2418 WARN_ON_ONCE(argowner != current);
2419 if (oldowner == current) {
2420 /*
2421 * We raced against a concurrent self; things are
2422 * already fixed up. Nothing to do.
2423 */
2424 return 1;
2425 }
2426 newowner = argowner;
2427 }
2428
2429 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2430 /* Owner died? */
2431 if (!pi_state->owner)
2432 newtid |= FUTEX_OWNER_DIED;
2433
2434 err = get_futex_value_locked(&uval, uaddr);
2435 if (err)
2436 goto handle_err;
2437
2438 for (;;) {
2439 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2440
2441 err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
2442 if (err)
2443 goto handle_err;
2444
2445 if (curval == uval)
2446 break;
2447 uval = curval;
2448 }
2449
2450 /*
2451 * We fixed up user space. Now we need to fix the pi_state
2452 * itself.
2453 */
2454 pi_state_update_owner(pi_state, newowner);
2455
2456 return argowner == current;
2457
2458 /*
2459 * In order to reschedule or handle a page fault, we need to drop the
2460 * locks here. In the case of a fault, this gives the other task
2461 * (either the highest priority waiter itself or the task which stole
2462 * the rtmutex) the chance to try the fixup of the pi_state. So once we
2463 * are back from handling the fault we need to check the pi_state after
2464 * reacquiring the locks and before trying to do another fixup. When
2465 * the fixup has been done already we simply return.
2466 *
2467 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
2468 * drop hb->lock since the caller owns the hb -> futex_q relation.
2469 * Dropping the pi_mutex->wait_lock requires the state revalidate.
2470 */
2471 handle_err:
2472 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2473 spin_unlock(q->lock_ptr);
2474
2475 switch (err) {
2476 case -EFAULT:
2477 err = fault_in_user_writeable(uaddr);
2478 break;
2479
2480 case -EAGAIN:
2481 cond_resched();
2482 err = 0;
2483 break;
2484
2485 default:
2486 WARN_ON_ONCE(1);
2487 break;
2488 }
2489
2490 spin_lock(q->lock_ptr);
2491 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2492
2493 /*
2494 * Check if someone else fixed it for us:
2495 */
2496 if (pi_state->owner != oldowner)
2497 return argowner == current;
2498
2499 /* Retry if err was -EAGAIN or the fault in succeeded */
2500 if (!err)
2501 goto retry;
2502
2503 /*
2504 * fault_in_user_writeable() failed so user state is immutable. At
2505 * best we can make the kernel state consistent but user state will
2506 * be most likely hosed and any subsequent unlock operation will be
2507 * rejected due to PI futex rule [10].
2508 *
2509 * Ensure that the rtmutex owner is also the pi_state owner despite
2510 * the user space value claiming something different. There is no
2511 * point in unlocking the rtmutex if current is the owner as it
2512 * would need to wait until the next waiter has taken the rtmutex
2513 * to guarantee consistent state. Keep it simple. Userspace asked
2514 * for this wreckaged state.
2515 *
2516 * The rtmutex has an owner - either current or some other
2517 * task. See the EAGAIN loop above.
2518 */
2519 pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
2520
2521 return err;
2522 }
2523
fixup_pi_state_owner(u32 __user * uaddr,struct futex_q * q,struct task_struct * argowner)2524 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2525 struct task_struct *argowner)
2526 {
2527 struct futex_pi_state *pi_state = q->pi_state;
2528 int ret;
2529
2530 lockdep_assert_held(q->lock_ptr);
2531
2532 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2533 ret = __fixup_pi_state_owner(uaddr, q, argowner);
2534 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2535 return ret;
2536 }
2537
2538 static long futex_wait_restart(struct restart_block *restart);
2539
2540 /**
2541 * fixup_owner() - Post lock pi_state and corner case management
2542 * @uaddr: user address of the futex
2543 * @q: futex_q (contains pi_state and access to the rt_mutex)
2544 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2545 *
2546 * After attempting to lock an rt_mutex, this function is called to cleanup
2547 * the pi_state owner as well as handle race conditions that may allow us to
2548 * acquire the lock. Must be called with the hb lock held.
2549 *
2550 * Return:
2551 * - 1 - success, lock taken;
2552 * - 0 - success, lock not taken;
2553 * - <0 - on error (-EFAULT)
2554 */
fixup_owner(u32 __user * uaddr,struct futex_q * q,int locked)2555 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2556 {
2557 if (locked) {
2558 /*
2559 * Got the lock. We might not be the anticipated owner if we
2560 * did a lock-steal - fix up the PI-state in that case:
2561 *
2562 * Speculative pi_state->owner read (we don't hold wait_lock);
2563 * since we own the lock pi_state->owner == current is the
2564 * stable state, anything else needs more attention.
2565 */
2566 if (q->pi_state->owner != current)
2567 return fixup_pi_state_owner(uaddr, q, current);
2568 return 1;
2569 }
2570
2571 /*
2572 * If we didn't get the lock; check if anybody stole it from us. In
2573 * that case, we need to fix up the uval to point to them instead of
2574 * us, otherwise bad things happen. [10]
2575 *
2576 * Another speculative read; pi_state->owner == current is unstable
2577 * but needs our attention.
2578 */
2579 if (q->pi_state->owner == current)
2580 return fixup_pi_state_owner(uaddr, q, NULL);
2581
2582 /*
2583 * Paranoia check. If we did not take the lock, then we should not be
2584 * the owner of the rt_mutex. Warn and establish consistent state.
2585 */
2586 if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
2587 return fixup_pi_state_owner(uaddr, q, current);
2588
2589 return 0;
2590 }
2591
2592 /**
2593 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2594 * @hb: the futex hash bucket, must be locked by the caller
2595 * @q: the futex_q to queue up on
2596 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2597 */
futex_wait_queue_me(struct futex_hash_bucket * hb,struct futex_q * q,struct hrtimer_sleeper * timeout)2598 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2599 struct hrtimer_sleeper *timeout)
2600 {
2601 /*
2602 * The task state is guaranteed to be set before another task can
2603 * wake it. set_current_state() is implemented using smp_store_mb() and
2604 * queue_me() calls spin_unlock() upon completion, both serializing
2605 * access to the hash list and forcing another memory barrier.
2606 */
2607 set_current_state(TASK_INTERRUPTIBLE);
2608 queue_me(q, hb);
2609
2610 /* Arm the timer */
2611 if (timeout)
2612 hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
2613
2614 /*
2615 * If we have been removed from the hash list, then another task
2616 * has tried to wake us, and we can skip the call to schedule().
2617 */
2618 if (likely(!plist_node_empty(&q->list))) {
2619 /*
2620 * If the timer has already expired, current will already be
2621 * flagged for rescheduling. Only call schedule if there
2622 * is no timeout, or if it has yet to expire.
2623 */
2624 if (!timeout || timeout->task) {
2625 trace_android_vh_futex_sleep_start(current);
2626 freezable_schedule();
2627 }
2628 }
2629 __set_current_state(TASK_RUNNING);
2630 }
2631
2632 /**
2633 * futex_wait_setup() - Prepare to wait on a futex
2634 * @uaddr: the futex userspace address
2635 * @val: the expected value
2636 * @flags: futex flags (FLAGS_SHARED, etc.)
2637 * @q: the associated futex_q
2638 * @hb: storage for hash_bucket pointer to be returned to caller
2639 *
2640 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2641 * compare it with the expected value. Handle atomic faults internally.
2642 * Return with the hb lock held and a q.key reference on success, and unlocked
2643 * with no q.key reference on failure.
2644 *
2645 * Return:
2646 * - 0 - uaddr contains val and hb has been locked;
2647 * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2648 */
futex_wait_setup(u32 __user * uaddr,u32 val,unsigned int flags,struct futex_q * q,struct futex_hash_bucket ** hb)2649 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2650 struct futex_q *q, struct futex_hash_bucket **hb)
2651 {
2652 u32 uval;
2653 int ret;
2654
2655 /*
2656 * Access the page AFTER the hash-bucket is locked.
2657 * Order is important:
2658 *
2659 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2660 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2661 *
2662 * The basic logical guarantee of a futex is that it blocks ONLY
2663 * if cond(var) is known to be true at the time of blocking, for
2664 * any cond. If we locked the hash-bucket after testing *uaddr, that
2665 * would open a race condition where we could block indefinitely with
2666 * cond(var) false, which would violate the guarantee.
2667 *
2668 * On the other hand, we insert q and release the hash-bucket only
2669 * after testing *uaddr. This guarantees that futex_wait() will NOT
2670 * absorb a wakeup if *uaddr does not match the desired values
2671 * while the syscall executes.
2672 */
2673 retry:
2674 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
2675 if (unlikely(ret != 0))
2676 return ret;
2677
2678 retry_private:
2679 *hb = queue_lock(q);
2680
2681 ret = get_futex_value_locked(&uval, uaddr);
2682
2683 if (ret) {
2684 queue_unlock(*hb);
2685
2686 ret = get_user(uval, uaddr);
2687 if (ret)
2688 return ret;
2689
2690 if (!(flags & FLAGS_SHARED))
2691 goto retry_private;
2692
2693 goto retry;
2694 }
2695
2696 if (uval != val) {
2697 queue_unlock(*hb);
2698 ret = -EWOULDBLOCK;
2699 }
2700
2701 return ret;
2702 }
2703
futex_wait(u32 __user * uaddr,unsigned int flags,u32 val,ktime_t * abs_time,u32 bitset)2704 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2705 ktime_t *abs_time, u32 bitset)
2706 {
2707 struct hrtimer_sleeper timeout, *to;
2708 struct restart_block *restart;
2709 struct futex_hash_bucket *hb;
2710 struct futex_q q = futex_q_init;
2711 int ret;
2712
2713 if (!bitset)
2714 return -EINVAL;
2715 q.bitset = bitset;
2716 trace_android_vh_futex_wait_start(flags, bitset);
2717
2718 to = futex_setup_timer(abs_time, &timeout, flags,
2719 current->timer_slack_ns);
2720 retry:
2721 /*
2722 * Prepare to wait on uaddr. On success, holds hb lock and increments
2723 * q.key refs.
2724 */
2725 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2726 if (ret)
2727 goto out;
2728
2729 /* queue_me and wait for wakeup, timeout, or a signal. */
2730 futex_wait_queue_me(hb, &q, to);
2731
2732 /* If we were woken (and unqueued), we succeeded, whatever. */
2733 ret = 0;
2734 /* unqueue_me() drops q.key ref */
2735 if (!unqueue_me(&q))
2736 goto out;
2737 ret = -ETIMEDOUT;
2738 if (to && !to->task)
2739 goto out;
2740
2741 /*
2742 * We expect signal_pending(current), but we might be the
2743 * victim of a spurious wakeup as well.
2744 */
2745 if (!signal_pending(current))
2746 goto retry;
2747
2748 ret = -ERESTARTSYS;
2749 if (!abs_time)
2750 goto out;
2751
2752 restart = ¤t->restart_block;
2753 restart->futex.uaddr = uaddr;
2754 restart->futex.val = val;
2755 restart->futex.time = *abs_time;
2756 restart->futex.bitset = bitset;
2757 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2758
2759 ret = set_restart_fn(restart, futex_wait_restart);
2760
2761 out:
2762 if (to) {
2763 hrtimer_cancel(&to->timer);
2764 destroy_hrtimer_on_stack(&to->timer);
2765 }
2766 trace_android_vh_futex_wait_end(flags, bitset);
2767 return ret;
2768 }
2769
2770
futex_wait_restart(struct restart_block * restart)2771 static long futex_wait_restart(struct restart_block *restart)
2772 {
2773 u32 __user *uaddr = restart->futex.uaddr;
2774 ktime_t t, *tp = NULL;
2775
2776 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2777 t = restart->futex.time;
2778 tp = &t;
2779 }
2780 restart->fn = do_no_restart_syscall;
2781
2782 return (long)futex_wait(uaddr, restart->futex.flags,
2783 restart->futex.val, tp, restart->futex.bitset);
2784 }
2785
2786
2787 /*
2788 * Userspace tried a 0 -> TID atomic transition of the futex value
2789 * and failed. The kernel side here does the whole locking operation:
2790 * if there are waiters then it will block as a consequence of relying
2791 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2792 * a 0 value of the futex too.).
2793 *
2794 * Also serves as futex trylock_pi()'ing, and due semantics.
2795 */
futex_lock_pi(u32 __user * uaddr,unsigned int flags,ktime_t * time,int trylock)2796 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2797 ktime_t *time, int trylock)
2798 {
2799 struct hrtimer_sleeper timeout, *to;
2800 struct task_struct *exiting = NULL;
2801 struct rt_mutex_waiter rt_waiter;
2802 struct futex_hash_bucket *hb;
2803 struct futex_q q = futex_q_init;
2804 int res, ret;
2805
2806 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2807 return -ENOSYS;
2808
2809 if (refill_pi_state_cache())
2810 return -ENOMEM;
2811
2812 to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0);
2813
2814 retry:
2815 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
2816 if (unlikely(ret != 0))
2817 goto out;
2818
2819 retry_private:
2820 hb = queue_lock(&q);
2821
2822 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
2823 &exiting, 0);
2824 if (unlikely(ret)) {
2825 /*
2826 * Atomic work succeeded and we got the lock,
2827 * or failed. Either way, we do _not_ block.
2828 */
2829 switch (ret) {
2830 case 1:
2831 /* We got the lock. */
2832 ret = 0;
2833 goto out_unlock_put_key;
2834 case -EFAULT:
2835 goto uaddr_faulted;
2836 case -EBUSY:
2837 case -EAGAIN:
2838 /*
2839 * Two reasons for this:
2840 * - EBUSY: Task is exiting and we just wait for the
2841 * exit to complete.
2842 * - EAGAIN: The user space value changed.
2843 */
2844 queue_unlock(hb);
2845 /*
2846 * Handle the case where the owner is in the middle of
2847 * exiting. Wait for the exit to complete otherwise
2848 * this task might loop forever, aka. live lock.
2849 */
2850 wait_for_owner_exiting(ret, exiting);
2851 cond_resched();
2852 goto retry;
2853 default:
2854 goto out_unlock_put_key;
2855 }
2856 }
2857
2858 WARN_ON(!q.pi_state);
2859
2860 /*
2861 * Only actually queue now that the atomic ops are done:
2862 */
2863 __queue_me(&q, hb);
2864
2865 if (trylock) {
2866 ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
2867 /* Fixup the trylock return value: */
2868 ret = ret ? 0 : -EWOULDBLOCK;
2869 goto no_block;
2870 }
2871
2872 rt_mutex_init_waiter(&rt_waiter);
2873
2874 /*
2875 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
2876 * hold it while doing rt_mutex_start_proxy(), because then it will
2877 * include hb->lock in the blocking chain, even through we'll not in
2878 * fact hold it while blocking. This will lead it to report -EDEADLK
2879 * and BUG when futex_unlock_pi() interleaves with this.
2880 *
2881 * Therefore acquire wait_lock while holding hb->lock, but drop the
2882 * latter before calling __rt_mutex_start_proxy_lock(). This
2883 * interleaves with futex_unlock_pi() -- which does a similar lock
2884 * handoff -- such that the latter can observe the futex_q::pi_state
2885 * before __rt_mutex_start_proxy_lock() is done.
2886 */
2887 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
2888 spin_unlock(q.lock_ptr);
2889 /*
2890 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
2891 * such that futex_unlock_pi() is guaranteed to observe the waiter when
2892 * it sees the futex_q::pi_state.
2893 */
2894 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
2895 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
2896
2897 if (ret) {
2898 if (ret == 1)
2899 ret = 0;
2900 goto cleanup;
2901 }
2902
2903 if (unlikely(to))
2904 hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
2905
2906 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
2907
2908 cleanup:
2909 spin_lock(q.lock_ptr);
2910 /*
2911 * If we failed to acquire the lock (deadlock/signal/timeout), we must
2912 * first acquire the hb->lock before removing the lock from the
2913 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
2914 * lists consistent.
2915 *
2916 * In particular; it is important that futex_unlock_pi() can not
2917 * observe this inconsistency.
2918 */
2919 if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
2920 ret = 0;
2921
2922 no_block:
2923 /*
2924 * Fixup the pi_state owner and possibly acquire the lock if we
2925 * haven't already.
2926 */
2927 res = fixup_owner(uaddr, &q, !ret);
2928 /*
2929 * If fixup_owner() returned an error, proprogate that. If it acquired
2930 * the lock, clear our -ETIMEDOUT or -EINTR.
2931 */
2932 if (res)
2933 ret = (res < 0) ? res : 0;
2934
2935 /* Unqueue and drop the lock */
2936 unqueue_me_pi(&q);
2937 goto out;
2938
2939 out_unlock_put_key:
2940 queue_unlock(hb);
2941
2942 out:
2943 if (to) {
2944 hrtimer_cancel(&to->timer);
2945 destroy_hrtimer_on_stack(&to->timer);
2946 }
2947 return ret != -EINTR ? ret : -ERESTARTNOINTR;
2948
2949 uaddr_faulted:
2950 queue_unlock(hb);
2951
2952 ret = fault_in_user_writeable(uaddr);
2953 if (ret)
2954 goto out;
2955
2956 if (!(flags & FLAGS_SHARED))
2957 goto retry_private;
2958
2959 goto retry;
2960 }
2961
2962 /*
2963 * Userspace attempted a TID -> 0 atomic transition, and failed.
2964 * This is the in-kernel slowpath: we look up the PI state (if any),
2965 * and do the rt-mutex unlock.
2966 */
futex_unlock_pi(u32 __user * uaddr,unsigned int flags)2967 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2968 {
2969 u32 curval, uval, vpid = task_pid_vnr(current);
2970 union futex_key key = FUTEX_KEY_INIT;
2971 struct futex_hash_bucket *hb;
2972 struct futex_q *top_waiter;
2973 int ret;
2974
2975 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2976 return -ENOSYS;
2977
2978 retry:
2979 if (get_user(uval, uaddr))
2980 return -EFAULT;
2981 /*
2982 * We release only a lock we actually own:
2983 */
2984 if ((uval & FUTEX_TID_MASK) != vpid)
2985 return -EPERM;
2986
2987 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE);
2988 if (ret)
2989 return ret;
2990
2991 hb = hash_futex(&key);
2992 spin_lock(&hb->lock);
2993
2994 /*
2995 * Check waiters first. We do not trust user space values at
2996 * all and we at least want to know if user space fiddled
2997 * with the futex value instead of blindly unlocking.
2998 */
2999 top_waiter = futex_top_waiter(hb, &key);
3000 if (top_waiter) {
3001 struct futex_pi_state *pi_state = top_waiter->pi_state;
3002
3003 ret = -EINVAL;
3004 if (!pi_state)
3005 goto out_unlock;
3006
3007 /*
3008 * If current does not own the pi_state then the futex is
3009 * inconsistent and user space fiddled with the futex value.
3010 */
3011 if (pi_state->owner != current)
3012 goto out_unlock;
3013
3014 get_pi_state(pi_state);
3015 /*
3016 * By taking wait_lock while still holding hb->lock, we ensure
3017 * there is no point where we hold neither; and therefore
3018 * wake_futex_pi() must observe a state consistent with what we
3019 * observed.
3020 *
3021 * In particular; this forces __rt_mutex_start_proxy() to
3022 * complete such that we're guaranteed to observe the
3023 * rt_waiter. Also see the WARN in wake_futex_pi().
3024 */
3025 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3026 spin_unlock(&hb->lock);
3027
3028 /* drops pi_state->pi_mutex.wait_lock */
3029 ret = wake_futex_pi(uaddr, uval, pi_state);
3030
3031 put_pi_state(pi_state);
3032
3033 /*
3034 * Success, we're done! No tricky corner cases.
3035 */
3036 if (!ret)
3037 goto out_putkey;
3038 /*
3039 * The atomic access to the futex value generated a
3040 * pagefault, so retry the user-access and the wakeup:
3041 */
3042 if (ret == -EFAULT)
3043 goto pi_faulted;
3044 /*
3045 * A unconditional UNLOCK_PI op raced against a waiter
3046 * setting the FUTEX_WAITERS bit. Try again.
3047 */
3048 if (ret == -EAGAIN)
3049 goto pi_retry;
3050 /*
3051 * wake_futex_pi has detected invalid state. Tell user
3052 * space.
3053 */
3054 goto out_putkey;
3055 }
3056
3057 /*
3058 * We have no kernel internal state, i.e. no waiters in the
3059 * kernel. Waiters which are about to queue themselves are stuck
3060 * on hb->lock. So we can safely ignore them. We do neither
3061 * preserve the WAITERS bit not the OWNER_DIED one. We are the
3062 * owner.
3063 */
3064 if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
3065 spin_unlock(&hb->lock);
3066 switch (ret) {
3067 case -EFAULT:
3068 goto pi_faulted;
3069
3070 case -EAGAIN:
3071 goto pi_retry;
3072
3073 default:
3074 WARN_ON_ONCE(1);
3075 goto out_putkey;
3076 }
3077 }
3078
3079 /*
3080 * If uval has changed, let user space handle it.
3081 */
3082 ret = (curval == uval) ? 0 : -EAGAIN;
3083
3084 out_unlock:
3085 spin_unlock(&hb->lock);
3086 out_putkey:
3087 return ret;
3088
3089 pi_retry:
3090 cond_resched();
3091 goto retry;
3092
3093 pi_faulted:
3094
3095 ret = fault_in_user_writeable(uaddr);
3096 if (!ret)
3097 goto retry;
3098
3099 return ret;
3100 }
3101
3102 /**
3103 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
3104 * @hb: the hash_bucket futex_q was original enqueued on
3105 * @q: the futex_q woken while waiting to be requeued
3106 * @key2: the futex_key of the requeue target futex
3107 * @timeout: the timeout associated with the wait (NULL if none)
3108 *
3109 * Detect if the task was woken on the initial futex as opposed to the requeue
3110 * target futex. If so, determine if it was a timeout or a signal that caused
3111 * the wakeup and return the appropriate error code to the caller. Must be
3112 * called with the hb lock held.
3113 *
3114 * Return:
3115 * - 0 = no early wakeup detected;
3116 * - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3117 */
3118 static inline
handle_early_requeue_pi_wakeup(struct futex_hash_bucket * hb,struct futex_q * q,union futex_key * key2,struct hrtimer_sleeper * timeout)3119 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
3120 struct futex_q *q, union futex_key *key2,
3121 struct hrtimer_sleeper *timeout)
3122 {
3123 int ret = 0;
3124
3125 /*
3126 * With the hb lock held, we avoid races while we process the wakeup.
3127 * We only need to hold hb (and not hb2) to ensure atomicity as the
3128 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
3129 * It can't be requeued from uaddr2 to something else since we don't
3130 * support a PI aware source futex for requeue.
3131 */
3132 if (!match_futex(&q->key, key2)) {
3133 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
3134 /*
3135 * We were woken prior to requeue by a timeout or a signal.
3136 * Unqueue the futex_q and determine which it was.
3137 */
3138 plist_del(&q->list, &hb->chain);
3139 hb_waiters_dec(hb);
3140
3141 /* Handle spurious wakeups gracefully */
3142 ret = -EWOULDBLOCK;
3143 if (timeout && !timeout->task)
3144 ret = -ETIMEDOUT;
3145 else if (signal_pending(current))
3146 ret = -ERESTARTNOINTR;
3147 }
3148 return ret;
3149 }
3150
3151 /**
3152 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3153 * @uaddr: the futex we initially wait on (non-pi)
3154 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3155 * the same type, no requeueing from private to shared, etc.
3156 * @val: the expected value of uaddr
3157 * @abs_time: absolute timeout
3158 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
3159 * @uaddr2: the pi futex we will take prior to returning to user-space
3160 *
3161 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3162 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
3163 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
3164 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
3165 * without one, the pi logic would not know which task to boost/deboost, if
3166 * there was a need to.
3167 *
3168 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3169 * via the following--
3170 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3171 * 2) wakeup on uaddr2 after a requeue
3172 * 3) signal
3173 * 4) timeout
3174 *
3175 * If 3, cleanup and return -ERESTARTNOINTR.
3176 *
3177 * If 2, we may then block on trying to take the rt_mutex and return via:
3178 * 5) successful lock
3179 * 6) signal
3180 * 7) timeout
3181 * 8) other lock acquisition failure
3182 *
3183 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3184 *
3185 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
3186 *
3187 * Return:
3188 * - 0 - On success;
3189 * - <0 - On error
3190 */
futex_wait_requeue_pi(u32 __user * uaddr,unsigned int flags,u32 val,ktime_t * abs_time,u32 bitset,u32 __user * uaddr2)3191 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3192 u32 val, ktime_t *abs_time, u32 bitset,
3193 u32 __user *uaddr2)
3194 {
3195 struct hrtimer_sleeper timeout, *to;
3196 struct rt_mutex_waiter rt_waiter;
3197 struct futex_hash_bucket *hb;
3198 union futex_key key2 = FUTEX_KEY_INIT;
3199 struct futex_q q = futex_q_init;
3200 int res, ret;
3201
3202 if (!IS_ENABLED(CONFIG_FUTEX_PI))
3203 return -ENOSYS;
3204
3205 if (uaddr == uaddr2)
3206 return -EINVAL;
3207
3208 if (!bitset)
3209 return -EINVAL;
3210
3211 to = futex_setup_timer(abs_time, &timeout, flags,
3212 current->timer_slack_ns);
3213
3214 /*
3215 * The waiter is allocated on our stack, manipulated by the requeue
3216 * code while we sleep on uaddr.
3217 */
3218 rt_mutex_init_waiter(&rt_waiter);
3219
3220 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
3221 if (unlikely(ret != 0))
3222 goto out;
3223
3224 q.bitset = bitset;
3225 q.rt_waiter = &rt_waiter;
3226 q.requeue_pi_key = &key2;
3227
3228 /*
3229 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
3230 * count.
3231 */
3232 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
3233 if (ret)
3234 goto out;
3235
3236 /*
3237 * The check above which compares uaddrs is not sufficient for
3238 * shared futexes. We need to compare the keys:
3239 */
3240 if (match_futex(&q.key, &key2)) {
3241 queue_unlock(hb);
3242 ret = -EINVAL;
3243 goto out;
3244 }
3245
3246 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
3247 futex_wait_queue_me(hb, &q, to);
3248
3249 spin_lock(&hb->lock);
3250 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
3251 spin_unlock(&hb->lock);
3252 if (ret)
3253 goto out;
3254
3255 /*
3256 * In order for us to be here, we know our q.key == key2, and since
3257 * we took the hb->lock above, we also know that futex_requeue() has
3258 * completed and we no longer have to concern ourselves with a wakeup
3259 * race with the atomic proxy lock acquisition by the requeue code. The
3260 * futex_requeue dropped our key1 reference and incremented our key2
3261 * reference count.
3262 */
3263
3264 /* Check if the requeue code acquired the second futex for us. */
3265 if (!q.rt_waiter) {
3266 /*
3267 * Got the lock. We might not be the anticipated owner if we
3268 * did a lock-steal - fix up the PI-state in that case.
3269 */
3270 if (q.pi_state && (q.pi_state->owner != current)) {
3271 spin_lock(q.lock_ptr);
3272 ret = fixup_pi_state_owner(uaddr2, &q, current);
3273 /*
3274 * Drop the reference to the pi state which
3275 * the requeue_pi() code acquired for us.
3276 */
3277 put_pi_state(q.pi_state);
3278 spin_unlock(q.lock_ptr);
3279 /*
3280 * Adjust the return value. It's either -EFAULT or
3281 * success (1) but the caller expects 0 for success.
3282 */
3283 ret = ret < 0 ? ret : 0;
3284 }
3285 } else {
3286 struct rt_mutex *pi_mutex;
3287
3288 /*
3289 * We have been woken up by futex_unlock_pi(), a timeout, or a
3290 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
3291 * the pi_state.
3292 */
3293 WARN_ON(!q.pi_state);
3294 pi_mutex = &q.pi_state->pi_mutex;
3295 ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
3296
3297 spin_lock(q.lock_ptr);
3298 if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
3299 ret = 0;
3300
3301 debug_rt_mutex_free_waiter(&rt_waiter);
3302 /*
3303 * Fixup the pi_state owner and possibly acquire the lock if we
3304 * haven't already.
3305 */
3306 res = fixup_owner(uaddr2, &q, !ret);
3307 /*
3308 * If fixup_owner() returned an error, proprogate that. If it
3309 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3310 */
3311 if (res)
3312 ret = (res < 0) ? res : 0;
3313
3314 /* Unqueue and drop the lock. */
3315 unqueue_me_pi(&q);
3316 }
3317
3318 if (ret == -EINTR) {
3319 /*
3320 * We've already been requeued, but cannot restart by calling
3321 * futex_lock_pi() directly. We could restart this syscall, but
3322 * it would detect that the user space "val" changed and return
3323 * -EWOULDBLOCK. Save the overhead of the restart and return
3324 * -EWOULDBLOCK directly.
3325 */
3326 ret = -EWOULDBLOCK;
3327 }
3328
3329 out:
3330 if (to) {
3331 hrtimer_cancel(&to->timer);
3332 destroy_hrtimer_on_stack(&to->timer);
3333 }
3334 return ret;
3335 }
3336
3337 /*
3338 * Support for robust futexes: the kernel cleans up held futexes at
3339 * thread exit time.
3340 *
3341 * Implementation: user-space maintains a per-thread list of locks it
3342 * is holding. Upon do_exit(), the kernel carefully walks this list,
3343 * and marks all locks that are owned by this thread with the
3344 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3345 * always manipulated with the lock held, so the list is private and
3346 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
3347 * field, to allow the kernel to clean up if the thread dies after
3348 * acquiring the lock, but just before it could have added itself to
3349 * the list. There can only be one such pending lock.
3350 */
3351
3352 /**
3353 * sys_set_robust_list() - Set the robust-futex list head of a task
3354 * @head: pointer to the list-head
3355 * @len: length of the list-head, as userspace expects
3356 */
SYSCALL_DEFINE2(set_robust_list,struct robust_list_head __user *,head,size_t,len)3357 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
3358 size_t, len)
3359 {
3360 if (!futex_cmpxchg_enabled)
3361 return -ENOSYS;
3362 /*
3363 * The kernel knows only one size for now:
3364 */
3365 if (unlikely(len != sizeof(*head)))
3366 return -EINVAL;
3367
3368 current->robust_list = head;
3369
3370 return 0;
3371 }
3372
3373 /**
3374 * sys_get_robust_list() - Get the robust-futex list head of a task
3375 * @pid: pid of the process [zero for current task]
3376 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
3377 * @len_ptr: pointer to a length field, the kernel fills in the header size
3378 */
SYSCALL_DEFINE3(get_robust_list,int,pid,struct robust_list_head __user * __user *,head_ptr,size_t __user *,len_ptr)3379 SYSCALL_DEFINE3(get_robust_list, int, pid,
3380 struct robust_list_head __user * __user *, head_ptr,
3381 size_t __user *, len_ptr)
3382 {
3383 struct robust_list_head __user *head;
3384 unsigned long ret;
3385 struct task_struct *p;
3386
3387 if (!futex_cmpxchg_enabled)
3388 return -ENOSYS;
3389
3390 rcu_read_lock();
3391
3392 ret = -ESRCH;
3393 if (!pid)
3394 p = current;
3395 else {
3396 p = find_task_by_vpid(pid);
3397 if (!p)
3398 goto err_unlock;
3399 }
3400
3401 ret = -EPERM;
3402 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3403 goto err_unlock;
3404
3405 head = p->robust_list;
3406 rcu_read_unlock();
3407
3408 if (put_user(sizeof(*head), len_ptr))
3409 return -EFAULT;
3410 return put_user(head, head_ptr);
3411
3412 err_unlock:
3413 rcu_read_unlock();
3414
3415 return ret;
3416 }
3417
3418 /* Constants for the pending_op argument of handle_futex_death */
3419 #define HANDLE_DEATH_PENDING true
3420 #define HANDLE_DEATH_LIST false
3421
3422 /*
3423 * Process a futex-list entry, check whether it's owned by the
3424 * dying task, and do notification if so:
3425 */
handle_futex_death(u32 __user * uaddr,struct task_struct * curr,bool pi,bool pending_op)3426 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
3427 bool pi, bool pending_op)
3428 {
3429 u32 uval, nval, mval;
3430 pid_t owner;
3431 int err;
3432
3433 /* Futex address must be 32bit aligned */
3434 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
3435 return -1;
3436
3437 retry:
3438 if (get_user(uval, uaddr))
3439 return -1;
3440
3441 /*
3442 * Special case for regular (non PI) futexes. The unlock path in
3443 * user space has two race scenarios:
3444 *
3445 * 1. The unlock path releases the user space futex value and
3446 * before it can execute the futex() syscall to wake up
3447 * waiters it is killed.
3448 *
3449 * 2. A woken up waiter is killed before it can acquire the
3450 * futex in user space.
3451 *
3452 * In the second case, the wake up notification could be generated
3453 * by the unlock path in user space after setting the futex value
3454 * to zero or by the kernel after setting the OWNER_DIED bit below.
3455 *
3456 * In both cases the TID validation below prevents a wakeup of
3457 * potential waiters which can cause these waiters to block
3458 * forever.
3459 *
3460 * In both cases the following conditions are met:
3461 *
3462 * 1) task->robust_list->list_op_pending != NULL
3463 * @pending_op == true
3464 * 2) The owner part of user space futex value == 0
3465 * 3) Regular futex: @pi == false
3466 *
3467 * If these conditions are met, it is safe to attempt waking up a
3468 * potential waiter without touching the user space futex value and
3469 * trying to set the OWNER_DIED bit. If the futex value is zero,
3470 * the rest of the user space mutex state is consistent, so a woken
3471 * waiter will just take over the uncontended futex. Setting the
3472 * OWNER_DIED bit would create inconsistent state and malfunction
3473 * of the user space owner died handling. Otherwise, the OWNER_DIED
3474 * bit is already set, and the woken waiter is expected to deal with
3475 * this.
3476 */
3477 owner = uval & FUTEX_TID_MASK;
3478
3479 if (pending_op && !pi && !owner) {
3480 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3481 return 0;
3482 }
3483
3484 if (owner != task_pid_vnr(curr))
3485 return 0;
3486
3487 /*
3488 * Ok, this dying thread is truly holding a futex
3489 * of interest. Set the OWNER_DIED bit atomically
3490 * via cmpxchg, and if the value had FUTEX_WAITERS
3491 * set, wake up a waiter (if any). (We have to do a
3492 * futex_wake() even if OWNER_DIED is already set -
3493 * to handle the rare but possible case of recursive
3494 * thread-death.) The rest of the cleanup is done in
3495 * userspace.
3496 */
3497 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3498
3499 /*
3500 * We are not holding a lock here, but we want to have
3501 * the pagefault_disable/enable() protection because
3502 * we want to handle the fault gracefully. If the
3503 * access fails we try to fault in the futex with R/W
3504 * verification via get_user_pages. get_user() above
3505 * does not guarantee R/W access. If that fails we
3506 * give up and leave the futex locked.
3507 */
3508 if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
3509 switch (err) {
3510 case -EFAULT:
3511 if (fault_in_user_writeable(uaddr))
3512 return -1;
3513 goto retry;
3514
3515 case -EAGAIN:
3516 cond_resched();
3517 goto retry;
3518
3519 default:
3520 WARN_ON_ONCE(1);
3521 return err;
3522 }
3523 }
3524
3525 if (nval != uval)
3526 goto retry;
3527
3528 /*
3529 * Wake robust non-PI futexes here. The wakeup of
3530 * PI futexes happens in exit_pi_state():
3531 */
3532 if (!pi && (uval & FUTEX_WAITERS))
3533 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3534
3535 return 0;
3536 }
3537
3538 /*
3539 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3540 */
fetch_robust_entry(struct robust_list __user ** entry,struct robust_list __user * __user * head,unsigned int * pi)3541 static inline int fetch_robust_entry(struct robust_list __user **entry,
3542 struct robust_list __user * __user *head,
3543 unsigned int *pi)
3544 {
3545 unsigned long uentry;
3546
3547 if (get_user(uentry, (unsigned long __user *)head))
3548 return -EFAULT;
3549
3550 *entry = (void __user *)(uentry & ~1UL);
3551 *pi = uentry & 1;
3552
3553 return 0;
3554 }
3555
3556 /*
3557 * Walk curr->robust_list (very carefully, it's a userspace list!)
3558 * and mark any locks found there dead, and notify any waiters.
3559 *
3560 * We silently return on any sign of list-walking problem.
3561 */
exit_robust_list(struct task_struct * curr)3562 static void exit_robust_list(struct task_struct *curr)
3563 {
3564 struct robust_list_head __user *head = curr->robust_list;
3565 struct robust_list __user *entry, *next_entry, *pending;
3566 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3567 unsigned int next_pi;
3568 unsigned long futex_offset;
3569 int rc;
3570
3571 if (!futex_cmpxchg_enabled)
3572 return;
3573
3574 /*
3575 * Fetch the list head (which was registered earlier, via
3576 * sys_set_robust_list()):
3577 */
3578 if (fetch_robust_entry(&entry, &head->list.next, &pi))
3579 return;
3580 /*
3581 * Fetch the relative futex offset:
3582 */
3583 if (get_user(futex_offset, &head->futex_offset))
3584 return;
3585 /*
3586 * Fetch any possibly pending lock-add first, and handle it
3587 * if it exists:
3588 */
3589 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3590 return;
3591
3592 next_entry = NULL; /* avoid warning with gcc */
3593 while (entry != &head->list) {
3594 /*
3595 * Fetch the next entry in the list before calling
3596 * handle_futex_death:
3597 */
3598 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3599 /*
3600 * A pending lock might already be on the list, so
3601 * don't process it twice:
3602 */
3603 if (entry != pending) {
3604 if (handle_futex_death((void __user *)entry + futex_offset,
3605 curr, pi, HANDLE_DEATH_LIST))
3606 return;
3607 }
3608 if (rc)
3609 return;
3610 entry = next_entry;
3611 pi = next_pi;
3612 /*
3613 * Avoid excessively long or circular lists:
3614 */
3615 if (!--limit)
3616 break;
3617
3618 cond_resched();
3619 }
3620
3621 if (pending) {
3622 handle_futex_death((void __user *)pending + futex_offset,
3623 curr, pip, HANDLE_DEATH_PENDING);
3624 }
3625 }
3626
futex_cleanup(struct task_struct * tsk)3627 static void futex_cleanup(struct task_struct *tsk)
3628 {
3629 if (unlikely(tsk->robust_list)) {
3630 exit_robust_list(tsk);
3631 tsk->robust_list = NULL;
3632 }
3633
3634 #ifdef CONFIG_COMPAT
3635 if (unlikely(tsk->compat_robust_list)) {
3636 compat_exit_robust_list(tsk);
3637 tsk->compat_robust_list = NULL;
3638 }
3639 #endif
3640
3641 if (unlikely(!list_empty(&tsk->pi_state_list)))
3642 exit_pi_state_list(tsk);
3643 }
3644
3645 /**
3646 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
3647 * @tsk: task to set the state on
3648 *
3649 * Set the futex exit state of the task lockless. The futex waiter code
3650 * observes that state when a task is exiting and loops until the task has
3651 * actually finished the futex cleanup. The worst case for this is that the
3652 * waiter runs through the wait loop until the state becomes visible.
3653 *
3654 * This is called from the recursive fault handling path in do_exit().
3655 *
3656 * This is best effort. Either the futex exit code has run already or
3657 * not. If the OWNER_DIED bit has been set on the futex then the waiter can
3658 * take it over. If not, the problem is pushed back to user space. If the
3659 * futex exit code did not run yet, then an already queued waiter might
3660 * block forever, but there is nothing which can be done about that.
3661 */
futex_exit_recursive(struct task_struct * tsk)3662 void futex_exit_recursive(struct task_struct *tsk)
3663 {
3664 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
3665 if (tsk->futex_state == FUTEX_STATE_EXITING)
3666 mutex_unlock(&tsk->futex_exit_mutex);
3667 tsk->futex_state = FUTEX_STATE_DEAD;
3668 }
3669
futex_cleanup_begin(struct task_struct * tsk)3670 static void futex_cleanup_begin(struct task_struct *tsk)
3671 {
3672 /*
3673 * Prevent various race issues against a concurrent incoming waiter
3674 * including live locks by forcing the waiter to block on
3675 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
3676 * attach_to_pi_owner().
3677 */
3678 mutex_lock(&tsk->futex_exit_mutex);
3679
3680 /*
3681 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
3682 *
3683 * This ensures that all subsequent checks of tsk->futex_state in
3684 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
3685 * tsk->pi_lock held.
3686 *
3687 * It guarantees also that a pi_state which was queued right before
3688 * the state change under tsk->pi_lock by a concurrent waiter must
3689 * be observed in exit_pi_state_list().
3690 */
3691 raw_spin_lock_irq(&tsk->pi_lock);
3692 tsk->futex_state = FUTEX_STATE_EXITING;
3693 raw_spin_unlock_irq(&tsk->pi_lock);
3694 }
3695
futex_cleanup_end(struct task_struct * tsk,int state)3696 static void futex_cleanup_end(struct task_struct *tsk, int state)
3697 {
3698 /*
3699 * Lockless store. The only side effect is that an observer might
3700 * take another loop until it becomes visible.
3701 */
3702 tsk->futex_state = state;
3703 /*
3704 * Drop the exit protection. This unblocks waiters which observed
3705 * FUTEX_STATE_EXITING to reevaluate the state.
3706 */
3707 mutex_unlock(&tsk->futex_exit_mutex);
3708 }
3709
futex_exec_release(struct task_struct * tsk)3710 void futex_exec_release(struct task_struct *tsk)
3711 {
3712 /*
3713 * The state handling is done for consistency, but in the case of
3714 * exec() there is no way to prevent futher damage as the PID stays
3715 * the same. But for the unlikely and arguably buggy case that a
3716 * futex is held on exec(), this provides at least as much state
3717 * consistency protection which is possible.
3718 */
3719 futex_cleanup_begin(tsk);
3720 futex_cleanup(tsk);
3721 /*
3722 * Reset the state to FUTEX_STATE_OK. The task is alive and about
3723 * exec a new binary.
3724 */
3725 futex_cleanup_end(tsk, FUTEX_STATE_OK);
3726 }
3727
futex_exit_release(struct task_struct * tsk)3728 void futex_exit_release(struct task_struct *tsk)
3729 {
3730 futex_cleanup_begin(tsk);
3731 futex_cleanup(tsk);
3732 futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
3733 }
3734
do_futex(u32 __user * uaddr,int op,u32 val,ktime_t * timeout,u32 __user * uaddr2,u32 val2,u32 val3)3735 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3736 u32 __user *uaddr2, u32 val2, u32 val3)
3737 {
3738 int cmd = op & FUTEX_CMD_MASK;
3739 unsigned int flags = 0;
3740
3741 if (!(op & FUTEX_PRIVATE_FLAG))
3742 flags |= FLAGS_SHARED;
3743
3744 if (op & FUTEX_CLOCK_REALTIME) {
3745 flags |= FLAGS_CLOCKRT;
3746 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
3747 return -ENOSYS;
3748 }
3749
3750 switch (cmd) {
3751 case FUTEX_LOCK_PI:
3752 case FUTEX_UNLOCK_PI:
3753 case FUTEX_TRYLOCK_PI:
3754 case FUTEX_WAIT_REQUEUE_PI:
3755 case FUTEX_CMP_REQUEUE_PI:
3756 if (!futex_cmpxchg_enabled)
3757 return -ENOSYS;
3758 }
3759
3760 trace_android_vh_do_futex(cmd, &flags, uaddr2);
3761 switch (cmd) {
3762 case FUTEX_WAIT:
3763 val3 = FUTEX_BITSET_MATCH_ANY;
3764 fallthrough;
3765 case FUTEX_WAIT_BITSET:
3766 return futex_wait(uaddr, flags, val, timeout, val3);
3767 case FUTEX_WAKE:
3768 val3 = FUTEX_BITSET_MATCH_ANY;
3769 fallthrough;
3770 case FUTEX_WAKE_BITSET:
3771 return futex_wake(uaddr, flags, val, val3);
3772 case FUTEX_REQUEUE:
3773 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3774 case FUTEX_CMP_REQUEUE:
3775 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3776 case FUTEX_WAKE_OP:
3777 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3778 case FUTEX_LOCK_PI:
3779 return futex_lock_pi(uaddr, flags, timeout, 0);
3780 case FUTEX_UNLOCK_PI:
3781 return futex_unlock_pi(uaddr, flags);
3782 case FUTEX_TRYLOCK_PI:
3783 return futex_lock_pi(uaddr, flags, NULL, 1);
3784 case FUTEX_WAIT_REQUEUE_PI:
3785 val3 = FUTEX_BITSET_MATCH_ANY;
3786 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3787 uaddr2);
3788 case FUTEX_CMP_REQUEUE_PI:
3789 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3790 }
3791 return -ENOSYS;
3792 }
3793
3794
SYSCALL_DEFINE6(futex,u32 __user *,uaddr,int,op,u32,val,struct __kernel_timespec __user *,utime,u32 __user *,uaddr2,u32,val3)3795 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3796 struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
3797 u32, val3)
3798 {
3799 struct timespec64 ts;
3800 ktime_t t, *tp = NULL;
3801 u32 val2 = 0;
3802 int cmd = op & FUTEX_CMD_MASK;
3803
3804 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3805 cmd == FUTEX_WAIT_BITSET ||
3806 cmd == FUTEX_WAIT_REQUEUE_PI)) {
3807 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3808 return -EFAULT;
3809 if (get_timespec64(&ts, utime))
3810 return -EFAULT;
3811 if (!timespec64_valid(&ts))
3812 return -EINVAL;
3813
3814 t = timespec64_to_ktime(ts);
3815 if (cmd == FUTEX_WAIT)
3816 t = ktime_add_safe(ktime_get(), t);
3817 else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
3818 t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
3819 tp = &t;
3820 }
3821 /*
3822 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3823 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3824 */
3825 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3826 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3827 val2 = (u32) (unsigned long) utime;
3828
3829 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3830 }
3831
3832 #ifdef CONFIG_COMPAT
3833 /*
3834 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3835 */
3836 static inline int
compat_fetch_robust_entry(compat_uptr_t * uentry,struct robust_list __user ** entry,compat_uptr_t __user * head,unsigned int * pi)3837 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
3838 compat_uptr_t __user *head, unsigned int *pi)
3839 {
3840 if (get_user(*uentry, head))
3841 return -EFAULT;
3842
3843 *entry = compat_ptr((*uentry) & ~1);
3844 *pi = (unsigned int)(*uentry) & 1;
3845
3846 return 0;
3847 }
3848
futex_uaddr(struct robust_list __user * entry,compat_long_t futex_offset)3849 static void __user *futex_uaddr(struct robust_list __user *entry,
3850 compat_long_t futex_offset)
3851 {
3852 compat_uptr_t base = ptr_to_compat(entry);
3853 void __user *uaddr = compat_ptr(base + futex_offset);
3854
3855 return uaddr;
3856 }
3857
3858 /*
3859 * Walk curr->robust_list (very carefully, it's a userspace list!)
3860 * and mark any locks found there dead, and notify any waiters.
3861 *
3862 * We silently return on any sign of list-walking problem.
3863 */
compat_exit_robust_list(struct task_struct * curr)3864 static void compat_exit_robust_list(struct task_struct *curr)
3865 {
3866 struct compat_robust_list_head __user *head = curr->compat_robust_list;
3867 struct robust_list __user *entry, *next_entry, *pending;
3868 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3869 unsigned int next_pi;
3870 compat_uptr_t uentry, next_uentry, upending;
3871 compat_long_t futex_offset;
3872 int rc;
3873
3874 if (!futex_cmpxchg_enabled)
3875 return;
3876
3877 /*
3878 * Fetch the list head (which was registered earlier, via
3879 * sys_set_robust_list()):
3880 */
3881 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
3882 return;
3883 /*
3884 * Fetch the relative futex offset:
3885 */
3886 if (get_user(futex_offset, &head->futex_offset))
3887 return;
3888 /*
3889 * Fetch any possibly pending lock-add first, and handle it
3890 * if it exists:
3891 */
3892 if (compat_fetch_robust_entry(&upending, &pending,
3893 &head->list_op_pending, &pip))
3894 return;
3895
3896 next_entry = NULL; /* avoid warning with gcc */
3897 while (entry != (struct robust_list __user *) &head->list) {
3898 /*
3899 * Fetch the next entry in the list before calling
3900 * handle_futex_death:
3901 */
3902 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
3903 (compat_uptr_t __user *)&entry->next, &next_pi);
3904 /*
3905 * A pending lock might already be on the list, so
3906 * dont process it twice:
3907 */
3908 if (entry != pending) {
3909 void __user *uaddr = futex_uaddr(entry, futex_offset);
3910
3911 if (handle_futex_death(uaddr, curr, pi,
3912 HANDLE_DEATH_LIST))
3913 return;
3914 }
3915 if (rc)
3916 return;
3917 uentry = next_uentry;
3918 entry = next_entry;
3919 pi = next_pi;
3920 /*
3921 * Avoid excessively long or circular lists:
3922 */
3923 if (!--limit)
3924 break;
3925
3926 cond_resched();
3927 }
3928 if (pending) {
3929 void __user *uaddr = futex_uaddr(pending, futex_offset);
3930
3931 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
3932 }
3933 }
3934
COMPAT_SYSCALL_DEFINE2(set_robust_list,struct compat_robust_list_head __user *,head,compat_size_t,len)3935 COMPAT_SYSCALL_DEFINE2(set_robust_list,
3936 struct compat_robust_list_head __user *, head,
3937 compat_size_t, len)
3938 {
3939 if (!futex_cmpxchg_enabled)
3940 return -ENOSYS;
3941
3942 if (unlikely(len != sizeof(*head)))
3943 return -EINVAL;
3944
3945 current->compat_robust_list = head;
3946
3947 return 0;
3948 }
3949
COMPAT_SYSCALL_DEFINE3(get_robust_list,int,pid,compat_uptr_t __user *,head_ptr,compat_size_t __user *,len_ptr)3950 COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
3951 compat_uptr_t __user *, head_ptr,
3952 compat_size_t __user *, len_ptr)
3953 {
3954 struct compat_robust_list_head __user *head;
3955 unsigned long ret;
3956 struct task_struct *p;
3957
3958 if (!futex_cmpxchg_enabled)
3959 return -ENOSYS;
3960
3961 rcu_read_lock();
3962
3963 ret = -ESRCH;
3964 if (!pid)
3965 p = current;
3966 else {
3967 p = find_task_by_vpid(pid);
3968 if (!p)
3969 goto err_unlock;
3970 }
3971
3972 ret = -EPERM;
3973 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3974 goto err_unlock;
3975
3976 head = p->compat_robust_list;
3977 rcu_read_unlock();
3978
3979 if (put_user(sizeof(*head), len_ptr))
3980 return -EFAULT;
3981 return put_user(ptr_to_compat(head), head_ptr);
3982
3983 err_unlock:
3984 rcu_read_unlock();
3985
3986 return ret;
3987 }
3988 #endif /* CONFIG_COMPAT */
3989
3990 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE6(futex_time32,u32 __user *,uaddr,int,op,u32,val,struct old_timespec32 __user *,utime,u32 __user *,uaddr2,u32,val3)3991 SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
3992 struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
3993 u32, val3)
3994 {
3995 struct timespec64 ts;
3996 ktime_t t, *tp = NULL;
3997 int val2 = 0;
3998 int cmd = op & FUTEX_CMD_MASK;
3999
4000 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
4001 cmd == FUTEX_WAIT_BITSET ||
4002 cmd == FUTEX_WAIT_REQUEUE_PI)) {
4003 if (get_old_timespec32(&ts, utime))
4004 return -EFAULT;
4005 if (!timespec64_valid(&ts))
4006 return -EINVAL;
4007
4008 t = timespec64_to_ktime(ts);
4009 if (cmd == FUTEX_WAIT)
4010 t = ktime_add_safe(ktime_get(), t);
4011 else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
4012 t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
4013 tp = &t;
4014 }
4015 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
4016 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
4017 val2 = (int) (unsigned long) utime;
4018
4019 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
4020 }
4021 #endif /* CONFIG_COMPAT_32BIT_TIME */
4022
futex_detect_cmpxchg(void)4023 static void __init futex_detect_cmpxchg(void)
4024 {
4025 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
4026 u32 curval;
4027
4028 /*
4029 * This will fail and we want it. Some arch implementations do
4030 * runtime detection of the futex_atomic_cmpxchg_inatomic()
4031 * functionality. We want to know that before we call in any
4032 * of the complex code paths. Also we want to prevent
4033 * registration of robust lists in that case. NULL is
4034 * guaranteed to fault and we get -EFAULT on functional
4035 * implementation, the non-functional ones will return
4036 * -ENOSYS.
4037 */
4038 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
4039 futex_cmpxchg_enabled = 1;
4040 #endif
4041 }
4042
futex_init(void)4043 static int __init futex_init(void)
4044 {
4045 unsigned int futex_shift;
4046 unsigned long i;
4047
4048 #if CONFIG_BASE_SMALL
4049 futex_hashsize = 16;
4050 #else
4051 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
4052 #endif
4053
4054 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
4055 futex_hashsize, 0,
4056 futex_hashsize < 256 ? HASH_SMALL : 0,
4057 &futex_shift, NULL,
4058 futex_hashsize, futex_hashsize);
4059 futex_hashsize = 1UL << futex_shift;
4060
4061 futex_detect_cmpxchg();
4062
4063 for (i = 0; i < futex_hashsize; i++) {
4064 atomic_set(&futex_queues[i].waiters, 0);
4065 plist_head_init(&futex_queues[i].chain);
4066 spin_lock_init(&futex_queues[i].lock);
4067 }
4068
4069 return 0;
4070 }
4071 core_initcall(futex_init);
4072